summaryrefslogtreecommitdiff
path: root/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp')
-rw-r--r--neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp539
1 files changed, 344 insertions, 195 deletions
diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
index ac5b96a8a4..dcf8451919 100644
--- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
@@ -20,7 +20,6 @@
#include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
#include <android-base/logging.h>
#include <android/binder_auto_utils.h>
-#include <android/sync.h>
#include <gtest/gtest.h>
#include <algorithm>
@@ -30,7 +29,6 @@
#include <numeric>
#include <vector>
-#include <MemoryUtils.h>
#include <android/binder_status.h>
#include <nnapi/Result.h>
#include <nnapi/SharedMemory.h>
@@ -43,6 +41,10 @@
#include "Utils.h"
#include "VtsHalNeuralnetworks.h"
+#ifdef __ANDROID__
+#include <android/sync.h>
+#endif // __ANDROID__
+
namespace aidl::android::hardware::neuralnetworks::vts::functional {
namespace nn = ::android::nn;
@@ -58,25 +60,66 @@ struct TestConfig {
bool measureTiming;
OutputType outputType;
MemoryType memoryType;
+ bool reusable;
// `reportSkipping` indicates if a test should print an info message in case
// it is skipped. The field is set to true by default and is set to false in
// quantization coupling tests to suppress skipping a test
bool reportSkipping;
- TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType)
+ // `useConfig` indicates if a test should use execute*WithConfig functions for the execution.
+ bool useConfig;
+ TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
+ bool reusable)
: executor(executor),
measureTiming(measureTiming),
outputType(outputType),
memoryType(memoryType),
- reportSkipping(true) {}
+ reusable(reusable),
+ reportSkipping(true),
+ useConfig(false) {}
TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
- bool reportSkipping)
+ bool reusable, bool reportSkipping)
: executor(executor),
measureTiming(measureTiming),
outputType(outputType),
memoryType(memoryType),
- reportSkipping(reportSkipping) {}
+ reusable(reusable),
+ reportSkipping(reportSkipping),
+ useConfig(false) {}
+ TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType,
+ bool reusable, bool reportSkipping, bool useConfig)
+ : executor(executor),
+ measureTiming(measureTiming),
+ outputType(outputType),
+ memoryType(memoryType),
+ reusable(reusable),
+ reportSkipping(reportSkipping),
+ useConfig(useConfig) {}
};
+std::string toString(OutputType type) {
+ switch (type) {
+ case OutputType::FULLY_SPECIFIED:
+ return "FULLY_SPECIFIED";
+ case OutputType::UNSPECIFIED:
+ return "UNSPECIFIED";
+ case OutputType::INSUFFICIENT:
+ return "INSUFFICIENT";
+ case OutputType::MISSED_DEADLINE:
+ return "MISSED_DEADLINE";
+ }
+}
+
+std::string toString(const TestConfig& config) {
+ std::stringstream ss;
+ ss << "TestConfig{.executor=" << toString(config.executor)
+ << ", .measureTiming=" << (config.measureTiming ? "true" : "false")
+ << ", .outputType=" << toString(config.outputType)
+ << ", .memoryType=" << toString(config.memoryType)
+ << ", .reusable=" << (config.reusable ? "true" : "false")
+ << ", .useConfig=" << (config.useConfig ? "true" : "false") << "}";
+ return ss.str();
+}
+
enum class IOType { INPUT, OUTPUT };
class DeviceMemoryAllocator {
@@ -240,10 +283,14 @@ void copyTestBuffers(const std::vector<const TestBuffer*>& buffers, uint8_t* out
} // namespace
void waitForSyncFence(int syncFd) {
- constexpr int kInfiniteTimeout = -1;
ASSERT_GT(syncFd, 0);
+#ifdef __ANDROID__
+ constexpr int kInfiniteTimeout = -1;
int r = sync_wait(syncFd, kInfiniteTimeout);
ASSERT_GE(r, 0);
+#else // __ANDROID__
+ LOG(FATAL) << "waitForSyncFence not supported on host";
+#endif // __ANDROID__
}
Model createModel(const TestModel& testModel) {
@@ -558,209 +605,269 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
loopTimeoutDurationNs = 1 * kMillisecond;
}
- ErrorStatus executionStatus;
- std::vector<OutputShape> outputShapes;
- Timing timing = kNoTiming;
- switch (testConfig.executor) {
- case Executor::SYNC: {
- SCOPED_TRACE("synchronous");
-
- ExecutionResult executionResult;
- // execute
- const auto ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
- kNoDeadline, loopTimeoutDurationNs,
- &executionResult);
- ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
- << ret.getDescription();
- if (ret.isOk()) {
- executionStatus = executionResult.outputSufficientSize
- ? ErrorStatus::NONE
- : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
- outputShapes = std::move(executionResult.outputShapes);
- timing = executionResult.timing;
- } else {
- executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
- }
- break;
- }
- case Executor::BURST: {
- SCOPED_TRACE("burst");
-
- // create burst
- std::shared_ptr<IBurst> burst;
- auto ret = preparedModel->configureExecutionBurst(&burst);
- ASSERT_TRUE(ret.isOk()) << ret.getDescription();
- ASSERT_NE(nullptr, burst.get());
-
- // associate a unique slot with each memory pool
- int64_t currentSlot = 0;
- std::vector<int64_t> slots;
- slots.reserve(request.pools.size());
- for (const auto& pool : request.pools) {
- if (pool.getTag() == RequestMemoryPool::Tag::pool) {
- slots.push_back(currentSlot++);
+ std::shared_ptr<IExecution> execution;
+ if (testConfig.reusable) {
+ const auto ret = preparedModel->createReusableExecution(
+ request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, &execution);
+ ASSERT_TRUE(ret.isOk()) << static_cast<nn::ErrorStatus>(ret.getServiceSpecificError());
+ ASSERT_NE(nullptr, execution.get());
+ }
+
+ const auto executeAndCheckResults = [&preparedModel, &execution, &testConfig, &testModel,
+ &context, &request, loopTimeoutDurationNs, skipped]() {
+ ErrorStatus executionStatus;
+ std::vector<OutputShape> outputShapes;
+ Timing timing = kNoTiming;
+ switch (testConfig.executor) {
+ case Executor::SYNC: {
+ SCOPED_TRACE("synchronous");
+
+ ExecutionResult executionResult;
+ // execute
+ ::ndk::ScopedAStatus ret;
+ if (testConfig.reusable) {
+ ret = execution->executeSynchronously(kNoDeadline, &executionResult);
+ } else if (testConfig.useConfig) {
+ ret = preparedModel->executeSynchronouslyWithConfig(
+ request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+ kNoDeadline, &executionResult);
} else {
- EXPECT_EQ(pool.getTag(), RequestMemoryPool::Tag::token);
- slots.push_back(-1);
+ ret = preparedModel->executeSynchronously(request, testConfig.measureTiming,
+ kNoDeadline, loopTimeoutDurationNs,
+ &executionResult);
}
+ ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
+ << ret.getDescription();
+ if (ret.isOk()) {
+ executionStatus = executionResult.outputSufficientSize
+ ? ErrorStatus::NONE
+ : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+ outputShapes = std::move(executionResult.outputShapes);
+ timing = executionResult.timing;
+ } else {
+ executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
+ }
+ break;
}
+ case Executor::BURST: {
+ SCOPED_TRACE("burst");
- ExecutionResult executionResult;
- // execute
- ret = burst->executeSynchronously(request, slots, testConfig.measureTiming, kNoDeadline,
- loopTimeoutDurationNs, &executionResult);
- ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
- << ret.getDescription();
- if (ret.isOk()) {
- executionStatus = executionResult.outputSufficientSize
- ? ErrorStatus::NONE
- : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
- outputShapes = std::move(executionResult.outputShapes);
- timing = executionResult.timing;
- } else {
- executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
- }
-
- // Mark each slot as unused after the execution. This is unnecessary because the burst
- // is freed after this scope ends, but this is here to test the functionality.
- for (int64_t slot : slots) {
- ret = burst->releaseMemoryResource(slot);
+ // create burst
+ std::shared_ptr<IBurst> burst;
+ auto ret = preparedModel->configureExecutionBurst(&burst);
ASSERT_TRUE(ret.isOk()) << ret.getDescription();
- }
+ ASSERT_NE(nullptr, burst.get());
+
+ // associate a unique slot with each memory pool
+ constexpr int64_t kIgnoreSlot = -1;
+ int64_t currentSlot = 0;
+ std::vector<int64_t> slots;
+ slots.reserve(request.pools.size());
+ for (const auto& pool : request.pools) {
+ if (pool.getTag() == RequestMemoryPool::Tag::pool) {
+ slots.push_back(currentSlot++);
+ } else {
+ EXPECT_EQ(pool.getTag(), RequestMemoryPool::Tag::token);
+ slots.push_back(kIgnoreSlot);
+ }
+ }
- break;
- }
- case Executor::FENCED: {
- SCOPED_TRACE("fenced");
- ErrorStatus result = ErrorStatus::NONE;
- FencedExecutionResult executionResult;
- auto ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
- kNoDeadline, loopTimeoutDurationNs, kNoDuration,
- &executionResult);
- ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
- << ret.getDescription();
- if (!ret.isOk()) {
- result = static_cast<ErrorStatus>(ret.getServiceSpecificError());
- executionStatus = result;
- } else if (executionResult.syncFence.get() != -1) {
- std::vector<ndk::ScopedFileDescriptor> waitFor;
- auto dupFd = dup(executionResult.syncFence.get());
- ASSERT_NE(dupFd, -1);
- waitFor.emplace_back(dupFd);
- // If a sync fence is returned, try start another run waiting for the sync fence.
- ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming,
- kNoDeadline, loopTimeoutDurationNs, kNoDuration,
- &executionResult);
- ASSERT_TRUE(ret.isOk());
- waitForSyncFence(executionResult.syncFence.get());
+ ExecutionResult executionResult;
+ // execute
+ if (testConfig.useConfig) {
+ ret = burst->executeSynchronouslyWithConfig(
+ request, slots,
+ {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, kNoDeadline,
+ &executionResult);
+ } else {
+ ret = burst->executeSynchronously(request, slots, testConfig.measureTiming,
+ kNoDeadline, loopTimeoutDurationNs,
+ &executionResult);
+ }
+ ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
+ << ret.getDescription();
+ if (ret.isOk()) {
+ executionStatus = executionResult.outputSufficientSize
+ ? ErrorStatus::NONE
+ : ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+ outputShapes = std::move(executionResult.outputShapes);
+ timing = executionResult.timing;
+ } else {
+ executionStatus = static_cast<ErrorStatus>(ret.getServiceSpecificError());
+ }
+
+ // Mark each slot as unused after the execution. This is unnecessary because the
+ // burst is freed after this scope ends, but this is here to test the functionality.
+ for (int64_t slot : slots) {
+ if (slot != kIgnoreSlot) {
+ ret = burst->releaseMemoryResource(slot);
+ ASSERT_TRUE(ret.isOk()) << ret.getDescription();
+ }
+ }
+
+ break;
}
- if (result == ErrorStatus::NONE) {
- ASSERT_NE(executionResult.callback, nullptr);
- Timing timingFenced;
- auto ret = executionResult.callback->getExecutionInfo(&timing, &timingFenced,
- &executionStatus);
- ASSERT_TRUE(ret.isOk());
+ case Executor::FENCED: {
+ SCOPED_TRACE("fenced");
+ ErrorStatus result = ErrorStatus::NONE;
+ FencedExecutionResult executionResult;
+ ::ndk::ScopedAStatus ret;
+ if (testConfig.reusable) {
+ ret = execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
+ } else if (testConfig.useConfig) {
+ ret = preparedModel->executeFencedWithConfig(
+ request, {}, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+ kNoDeadline, kNoDuration, &executionResult);
+ } else {
+ ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
+ kNoDeadline, loopTimeoutDurationNs,
+ kNoDuration, &executionResult);
+ }
+ ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
+ << ret.getDescription();
+ if (!ret.isOk()) {
+ result = static_cast<ErrorStatus>(ret.getServiceSpecificError());
+ executionStatus = result;
+ } else if (executionResult.syncFence.get() != -1) {
+ std::vector<ndk::ScopedFileDescriptor> waitFor;
+ auto dupFd = dup(executionResult.syncFence.get());
+ ASSERT_NE(dupFd, -1);
+ waitFor.emplace_back(dupFd);
+ // If a sync fence is returned, try start another run waiting for the sync
+ // fence.
+ if (testConfig.reusable) {
+ ret = execution->executeFenced(waitFor, kNoDeadline, kNoDuration,
+ &executionResult);
+ } else if (testConfig.useConfig) {
+ ret = preparedModel->executeFencedWithConfig(
+ request, waitFor,
+ {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}},
+ kNoDeadline, kNoDuration, &executionResult);
+ } else {
+ ret = preparedModel->executeFenced(
+ request, waitFor, testConfig.measureTiming, kNoDeadline,
+ loopTimeoutDurationNs, kNoDuration, &executionResult);
+ }
+ ASSERT_TRUE(ret.isOk());
+ waitForSyncFence(executionResult.syncFence.get());
+ }
+ if (result == ErrorStatus::NONE) {
+ ASSERT_NE(executionResult.callback, nullptr);
+ Timing timingFenced;
+ auto ret = executionResult.callback->getExecutionInfo(&timing, &timingFenced,
+ &executionStatus);
+ ASSERT_TRUE(ret.isOk());
+ }
+ break;
+ }
+ default: {
+ FAIL() << "Unsupported execution mode for AIDL interface.";
}
- break;
- }
- default: {
- FAIL() << "Unsupported execution mode for AIDL interface.";
- }
- }
-
- if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
- executionStatus == ErrorStatus::GENERAL_FAILURE) {
- if (skipped != nullptr) {
- *skipped = true;
- }
- if (!testConfig.reportSkipping) {
- return;
- }
- LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
- "execute model that it does not support.";
- std::cout << "[ ] Early termination of test because vendor service cannot "
- "execute model that it does not support."
- << std::endl;
- GTEST_SKIP();
- }
- if (!testConfig.measureTiming) {
- EXPECT_EQ(timing, kNoTiming);
- } else {
- if (timing.timeOnDeviceNs != -1 && timing.timeInDriverNs != -1) {
- EXPECT_LE(timing.timeOnDeviceNs, timing.timeInDriverNs);
}
- }
- switch (testConfig.outputType) {
- case OutputType::FULLY_SPECIFIED:
- if (testConfig.executor == Executor::FENCED && hasZeroSizedOutput(testModel)) {
- // Executor::FENCED does not support zero-sized output.
- ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
- return;
+ if (testConfig.outputType != OutputType::FULLY_SPECIFIED &&
+ executionStatus == ErrorStatus::GENERAL_FAILURE) {
+ if (skipped != nullptr) {
+ *skipped = true;
}
- // If the model output operands are fully specified, outputShapes must be either
- // either empty, or have the same number of elements as the number of outputs.
- ASSERT_EQ(ErrorStatus::NONE, executionStatus);
- ASSERT_TRUE(outputShapes.size() == 0 ||
- outputShapes.size() == testModel.main.outputIndexes.size());
- break;
- case OutputType::UNSPECIFIED:
- if (testConfig.executor == Executor::FENCED) {
- // For Executor::FENCED, the output shape must be fully specified.
- ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
+ if (!testConfig.reportSkipping) {
return;
}
- // If the model output operands are not fully specified, outputShapes must have
- // the same number of elements as the number of outputs.
- ASSERT_EQ(ErrorStatus::NONE, executionStatus);
- ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
- break;
- case OutputType::INSUFFICIENT:
- if (testConfig.executor == Executor::FENCED) {
- // For Executor::FENCED, the output shape must be fully specified.
- ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
- return;
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "execute model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "execute model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ if (!testConfig.measureTiming) {
+ EXPECT_EQ(timing, kNoTiming);
+ } else {
+ if (timing.timeOnDeviceNs != -1 && timing.timeInDriverNs != -1) {
+ EXPECT_LE(timing.timeOnDeviceNs, timing.timeInDriverNs);
}
- ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
- ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
- // Check that all returned output dimensions are at least as fully specified as the
- // union of the information about the corresponding operand in the model and in the
- // request. In this test, all model outputs have known rank with all dimensions
- // unspecified, and no dimensional information is provided in the request.
- for (uint32_t i = 0; i < outputShapes.size(); i++) {
- ASSERT_EQ(outputShapes[i].isSufficient, i != kInsufficientOutputIndex);
- const auto& actual = outputShapes[i].dimensions;
- const auto& golden =
- testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
- ASSERT_EQ(actual.size(), golden.size());
- for (uint32_t j = 0; j < actual.size(); j++) {
- if (actual[j] == 0) continue;
- EXPECT_EQ(actual[j], golden[j]) << "index: " << j;
+ }
+
+ switch (testConfig.outputType) {
+ case OutputType::FULLY_SPECIFIED:
+ if (testConfig.executor == Executor::FENCED && hasZeroSizedOutput(testModel)) {
+ // Executor::FENCED does not support zero-sized output.
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
+ return;
}
- }
- return;
- case OutputType::MISSED_DEADLINE:
- ASSERT_TRUE(executionStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
- executionStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT)
- << "executionStatus = " << executionStatus;
- return;
- }
+ // If the model output operands are fully specified, outputShapes must be either
+ // either empty, or have the same number of elements as the number of outputs.
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ ASSERT_TRUE(outputShapes.size() == 0 ||
+ outputShapes.size() == testModel.main.outputIndexes.size());
+ break;
+ case OutputType::UNSPECIFIED:
+ if (testConfig.executor == Executor::FENCED) {
+ // For Executor::FENCED, the output shape must be fully specified.
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
+ return;
+ }
+ // If the model output operands are not fully specified, outputShapes must have
+ // the same number of elements as the number of outputs.
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
+ break;
+ case OutputType::INSUFFICIENT:
+ if (testConfig.executor == Executor::FENCED) {
+ // For Executor::FENCED, the output shape must be fully specified.
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionStatus);
+ return;
+ }
+ ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
+ ASSERT_EQ(outputShapes.size(), testModel.main.outputIndexes.size());
+ // Check that all returned output dimensions are at least as fully specified as the
+ // union of the information about the corresponding operand in the model and in the
+ // request. In this test, all model outputs have known rank with all dimensions
+ // unspecified, and no dimensional information is provided in the request.
+ for (uint32_t i = 0; i < outputShapes.size(); i++) {
+ ASSERT_EQ(outputShapes[i].isSufficient, i != kInsufficientOutputIndex);
+ const auto& actual = outputShapes[i].dimensions;
+ const auto& golden =
+ testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
+ ASSERT_EQ(actual.size(), golden.size());
+ for (uint32_t j = 0; j < actual.size(); j++) {
+ if (actual[j] == 0) continue;
+ EXPECT_EQ(actual[j], golden[j]) << "index: " << j;
+ }
+ }
+ return;
+ case OutputType::MISSED_DEADLINE:
+ ASSERT_TRUE(executionStatus == ErrorStatus::MISSED_DEADLINE_TRANSIENT ||
+ executionStatus == ErrorStatus::MISSED_DEADLINE_PERSISTENT)
+ << "executionStatus = " << executionStatus;
+ return;
+ }
- // Go through all outputs, check returned output shapes.
- for (uint32_t i = 0; i < outputShapes.size(); i++) {
- EXPECT_TRUE(outputShapes[i].isSufficient);
- const auto& expect = testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
- const auto unsignedActual = nn::toUnsigned(outputShapes[i].dimensions);
- ASSERT_TRUE(unsignedActual.has_value());
- const std::vector<uint32_t>& actual = unsignedActual.value();
- EXPECT_EQ(expect, actual);
- }
+ // Go through all outputs, check returned output shapes.
+ for (uint32_t i = 0; i < outputShapes.size(); i++) {
+ EXPECT_TRUE(outputShapes[i].isSufficient);
+ const auto& expect =
+ testModel.main.operands[testModel.main.outputIndexes[i]].dimensions;
+ const auto unsignedActual = nn::toUnsigned(outputShapes[i].dimensions);
+ ASSERT_TRUE(unsignedActual.has_value());
+ const std::vector<uint32_t>& actual = unsignedActual.value();
+ EXPECT_EQ(expect, actual);
+ }
+
+ // Retrieve execution results.
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
- // Retrieve execution results.
- const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
+ // We want "close-enough" results.
+ checkResults(testModel, outputs);
+ };
- // We want "close-enough" results.
- checkResults(testModel, outputs);
+ executeAndCheckResults();
+
+ // For reusable execution tests, run the execution twice.
+ if (testConfig.reusable) {
+ SCOPED_TRACE("Second execution");
+ executeAndCheckResults();
+ }
}
void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
@@ -770,6 +877,15 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
std::vector<bool> measureTimingList;
std::vector<Executor> executorList;
std::vector<MemoryType> memoryTypeList;
+ std::vector<bool> reusableList = {false};
+ std::vector<bool> useConfigList = {false};
+
+ int deviceVersion;
+ ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
+ if (deviceVersion >= kMinAidlLevelForFL8) {
+ reusableList.push_back(true);
+ useConfigList.push_back(true);
+ }
switch (testKind) {
case TestKind::GENERAL: {
@@ -788,7 +904,11 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {false};
executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED};
+#ifdef __ANDROID__
memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE};
+#else // __ANDROID__
+ memoryTypeList = {MemoryType::DEVICE}; // BLOB_AHWB is not supported on the host.
+#endif // __ANDROID__
} break;
case TestKind::FENCED_COMPUTE: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
@@ -812,8 +932,16 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device,
for (const bool measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
for (const MemoryType memoryType : memoryTypeList) {
- const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
- EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+ for (const bool reusable : reusableList) {
+ for (const bool useConfig : useConfigList) {
+ if ((useConfig || executor == Executor::BURST) && reusable) continue;
+ const TestConfig testConfig(executor, measureTiming, outputType,
+ memoryType, reusable,
+ /*reportSkipping=*/true, useConfig);
+ SCOPED_TRACE(toString(testConfig));
+ EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+ }
+ }
}
}
}
@@ -833,7 +961,7 @@ void EvaluatePreparedCoupledModels(const std::shared_ptr<IDevice>& device,
for (const bool measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
- /*reportSkipping=*/false);
+ /*reusable=*/false, /*reportSkipping=*/false);
bool baseSkipped = false;
EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
bool coupledSkipped = false;
@@ -871,6 +999,13 @@ void Execute(const std::shared_ptr<IDevice>& device, const TestModel& testModel,
createPreparedModel(device, model, &preparedModel);
if (preparedModel == nullptr) return;
EvaluatePreparedModel(device, preparedModel, testModel, testKind);
+ int32_t deviceVersion;
+ ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk());
+ if (deviceVersion >= kMinAidlLevelForFL8) {
+ createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ true,
+ /*useConfig*/ true);
+ EvaluatePreparedModel(device, preparedModel, testModel, testKind);
+ }
} break;
case TestKind::QUANTIZATION_COUPLING: {
ASSERT_TRUE(testModel.hasQuant8CoupledOperands());
@@ -907,6 +1042,20 @@ void GeneratedTestBase::SetUp() {
const bool deviceIsResponsive =
ndk::ScopedAStatus::fromStatus(AIBinder_ping(kDevice->asBinder().get())).isOk();
ASSERT_TRUE(deviceIsResponsive);
+ // TODO(b/201260787): We should require old drivers to report the model as
+ // unsupported instead of simply skipping the test.
+ SkipIfDriverOlderThanTestModel();
+}
+
+void GeneratedTestBase::SkipIfDriverOlderThanTestModel() {
+ int32_t deviceVersion;
+ ASSERT_TRUE(kDevice->getInterfaceVersion(&deviceVersion).isOk());
+ const int32_t modelVersion = kTestModel.getAidlVersionInt();
+ if (deviceVersion < modelVersion) {
+ GTEST_SKIP() << "Device interface version " << deviceVersion
+ << " is older than test model's minimum supported HAL version " << modelVersion
+ << ". Skipping test.";
+ }
}
std::vector<NamedModel> getNamedModels(const FilterFn& filter) {