diff options
author | Haamed Gheibi <haamed@google.com> | 2022-02-09 14:35:06 -0800 |
---|---|---|
committer | Haamed Gheibi <haamed@google.com> | 2022-02-09 14:41:16 -0800 |
commit | ab52181d73b04e131fd72e32d69b5123a5d6892b (patch) | |
tree | 0ac86b537180b6fb97716b3058dfae44af9eaac7 /neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp | |
parent | f99b35c293439db0b7436b47b939eb8c7bf21b51 (diff) | |
parent | 4d2548cfa7b86b79a516be9b60f6b666cc9af682 (diff) |
Merge TP1A.220126.001
Change-Id: Ibf6bd2c20d9927fde8b2a05dde2b58bd8faea20f
Diffstat (limited to 'neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp')
-rw-r--r-- | neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp | 99 |
1 files changed, 80 insertions, 19 deletions
diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp index 2460fbad86..40f6cd1573 100644 --- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp @@ -20,7 +20,6 @@ #include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h> #include <android-base/logging.h> #include <android/binder_auto_utils.h> -#include <android/sync.h> #include <gtest/gtest.h> #include <algorithm> @@ -30,7 +29,6 @@ #include <numeric> #include <vector> -#include <MemoryUtils.h> #include <android/binder_status.h> #include <nnapi/Result.h> #include <nnapi/SharedMemory.h> @@ -43,6 +41,10 @@ #include "Utils.h" #include "VtsHalNeuralnetworks.h" +#ifdef __ANDROID__ +#include <android/sync.h> +#endif // __ANDROID__ + namespace aidl::android::hardware::neuralnetworks::vts::functional { namespace nn = ::android::nn; @@ -63,6 +65,8 @@ struct TestConfig { // it is skipped. The field is set to true by default and is set to false in // quantization coupling tests to suppress skipping a test bool reportSkipping; + // `useConfig` indicates if a test should use execute*WithConfig functions for the execution. + bool useConfig; TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType, bool reusable) : executor(executor), @@ -70,7 +74,8 @@ struct TestConfig { outputType(outputType), memoryType(memoryType), reusable(reusable), - reportSkipping(true) {} + reportSkipping(true), + useConfig(false) {} TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType, bool reusable, bool reportSkipping) : executor(executor), @@ -78,7 +83,17 @@ struct TestConfig { outputType(outputType), memoryType(memoryType), reusable(reusable), - reportSkipping(reportSkipping) {} + reportSkipping(reportSkipping), + useConfig(false) {} + TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType, + bool reusable, bool reportSkipping, bool useConfig) + : executor(executor), + measureTiming(measureTiming), + outputType(outputType), + memoryType(memoryType), + reusable(reusable), + reportSkipping(reportSkipping), + useConfig(useConfig) {} }; std::string toString(OutputType type) { @@ -100,7 +115,8 @@ std::string toString(const TestConfig& config) { << ", .measureTiming=" << (config.measureTiming ? "true" : "false") << ", .outputType=" << toString(config.outputType) << ", .memoryType=" << toString(config.memoryType) - << ", .reusable=" << (config.reusable ? "true" : "false") << "}"; + << ", .reusable=" << (config.reusable ? "true" : "false") + << ", .useConfig=" << (config.useConfig ? "true" : "false") << "}"; return ss.str(); } @@ -267,10 +283,14 @@ void copyTestBuffers(const std::vector<const TestBuffer*>& buffers, uint8_t* out } // namespace void waitForSyncFence(int syncFd) { - constexpr int kInfiniteTimeout = -1; ASSERT_GT(syncFd, 0); +#ifdef __ANDROID__ + constexpr int kInfiniteTimeout = -1; int r = sync_wait(syncFd, kInfiniteTimeout); ASSERT_GE(r, 0); +#else // __ANDROID__ + LOG(FATAL) << "waitForSyncFence not supported on host"; +#endif // __ANDROID__ } Model createModel(const TestModel& testModel) { @@ -587,8 +607,8 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, std::shared_ptr<IExecution> execution; if (testConfig.reusable) { - const auto ret = preparedModel->createReusableExecution(request, testConfig.measureTiming, - loopTimeoutDurationNs, &execution); + const auto ret = preparedModel->createReusableExecution( + request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, &execution); ASSERT_TRUE(ret.isOk()) << static_cast<nn::ErrorStatus>(ret.getServiceSpecificError()); ASSERT_NE(nullptr, execution.get()); } @@ -607,6 +627,10 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, ::ndk::ScopedAStatus ret; if (testConfig.reusable) { ret = execution->executeSynchronously(kNoDeadline, &executionResult); + } else if (testConfig.useConfig) { + ret = preparedModel->executeSynchronouslyWithConfig( + request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, + kNoDeadline, &executionResult); } else { ret = preparedModel->executeSynchronously(request, testConfig.measureTiming, kNoDeadline, loopTimeoutDurationNs, @@ -649,9 +673,16 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, ExecutionResult executionResult; // execute - ret = burst->executeSynchronously(request, slots, testConfig.measureTiming, - kNoDeadline, loopTimeoutDurationNs, - &executionResult); + if (testConfig.useConfig) { + ret = burst->executeSynchronouslyWithConfig( + request, slots, + {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, kNoDeadline, + &executionResult); + } else { + ret = burst->executeSynchronously(request, slots, testConfig.measureTiming, + kNoDeadline, loopTimeoutDurationNs, + &executionResult); + } ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC) << ret.getDescription(); if (ret.isOk()) { @@ -680,6 +711,10 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, ::ndk::ScopedAStatus ret; if (testConfig.reusable) { ret = execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult); + } else if (testConfig.useConfig) { + ret = preparedModel->executeFencedWithConfig( + request, {}, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, + kNoDeadline, kNoDuration, &executionResult); } else { ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming, kNoDeadline, loopTimeoutDurationNs, @@ -697,9 +732,19 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, waitFor.emplace_back(dupFd); // If a sync fence is returned, try start another run waiting for the sync // fence. - ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming, - kNoDeadline, loopTimeoutDurationNs, - kNoDuration, &executionResult); + if (testConfig.reusable) { + ret = execution->executeFenced(waitFor, kNoDeadline, kNoDuration, + &executionResult); + } else if (testConfig.useConfig) { + ret = preparedModel->executeFencedWithConfig( + request, waitFor, + {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, + kNoDeadline, kNoDuration, &executionResult); + } else { + ret = preparedModel->executeFenced( + request, waitFor, testConfig.measureTiming, kNoDeadline, + loopTimeoutDurationNs, kNoDuration, &executionResult); + } ASSERT_TRUE(ret.isOk()); waitForSyncFence(executionResult.syncFence.get()); } @@ -830,11 +875,13 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, std::vector<Executor> executorList; std::vector<MemoryType> memoryTypeList; std::vector<bool> reusableList = {false}; + std::vector<bool> useConfigList = {false}; int deviceVersion; ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk()); if (deviceVersion >= kMinAidlLevelForFL8) { reusableList.push_back(true); + useConfigList.push_back(true); } switch (testKind) { @@ -854,7 +901,11 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, outputTypesList = {OutputType::FULLY_SPECIFIED}; measureTimingList = {false}; executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED}; +#ifdef __ANDROID__ memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE}; +#else // __ANDROID__ + memoryTypeList = {MemoryType::DEVICE}; // BLOB_AHWB is not supported on the host. +#endif // __ANDROID__ } break; case TestKind::FENCED_COMPUTE: { outputTypesList = {OutputType::FULLY_SPECIFIED}; @@ -879,11 +930,14 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, for (const Executor executor : executorList) { for (const MemoryType memoryType : memoryTypeList) { for (const bool reusable : reusableList) { - if (executor == Executor::BURST && reusable) continue; - const TestConfig testConfig(executor, measureTiming, outputType, memoryType, - reusable); - SCOPED_TRACE(toString(testConfig)); - EvaluatePreparedModel(device, preparedModel, testModel, testConfig); + for (const bool useConfig : useConfigList) { + if ((useConfig || executor == Executor::BURST) && reusable) continue; + const TestConfig testConfig(executor, measureTiming, outputType, + memoryType, reusable, + /*reportSkipping=*/true, useConfig); + SCOPED_TRACE(toString(testConfig)); + EvaluatePreparedModel(device, preparedModel, testModel, testConfig); + } } } } @@ -942,6 +996,13 @@ void Execute(const std::shared_ptr<IDevice>& device, const TestModel& testModel, createPreparedModel(device, model, &preparedModel); if (preparedModel == nullptr) return; EvaluatePreparedModel(device, preparedModel, testModel, testKind); + int32_t deviceVersion; + ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk()); + if (deviceVersion >= kMinAidlLevelForFL8) { + createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ true, + /*useConfig*/ true); + EvaluatePreparedModel(device, preparedModel, testModel, testKind); + } } break; case TestKind::QUANTIZATION_COUPLING: { ASSERT_TRUE(testModel.hasQuant8CoupledOperands()); |