diff options
Diffstat (limited to 'neuralnetworks/aidl/vts/functional')
-rw-r--r-- | neuralnetworks/aidl/vts/functional/Android.bp | 38 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/AndroidTestDevice.xml (renamed from neuralnetworks/aidl/vts/functional/AndroidTest.xml) | 0 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/AndroidTestHost.xml | 22 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/CompilationCachingTests.cpp | 6 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp | 99 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp | 28 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/Utils.cpp | 12 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/Utils.h | 1 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/ValidateModel.cpp | 29 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/ValidateRequest.cpp | 55 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp | 45 | ||||
-rw-r--r-- | neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h | 4 |
12 files changed, 291 insertions, 48 deletions
diff --git a/neuralnetworks/aidl/vts/functional/Android.bp b/neuralnetworks/aidl/vts/functional/Android.bp index 1ed15b82a7..356cdb0956 100644 --- a/neuralnetworks/aidl/vts/functional/Android.bp +++ b/neuralnetworks/aidl/vts/functional/Android.bp @@ -30,6 +30,7 @@ cc_test { "neuralnetworks_vts_functional_defaults", "use_libaidlvintf_gtest_helper_static", ], + host_supported: true, srcs: [ "BasicTests.cpp", "Callbacks.cpp", @@ -46,18 +47,11 @@ cc_test { ], shared_libs: [ "libbinder_ndk", - "libnativewindow", - "libvndksupport", ], static_libs: [ - "android.hidl.allocator@1.0", - "android.hidl.memory@1.0", "libaidlcommonsupport", - "libgmock", - "libhidlmemory", "libneuralnetworks_common", "libneuralnetworks_generated_test_harness", - "libsync", ], whole_static_libs: [ "neuralnetworks_generated_AIDL_V3_example", @@ -73,6 +67,34 @@ cc_test { ], test_suites: [ "general-tests", - "vts", ], + target: { + android: { + shared_libs: [ + "libnativewindow", + "libvndksupport", + ], + static_libs: [ + "libsync", + ], + test_suites: [ + "vts", + ], + test_config: "AndroidTestDevice.xml", + }, + host: { + shared_libs: [ + "libtextclassifier_hash", + ], + static_libs: [ + "neuralnetworks_canonical_sample_driver", + "neuralnetworks_utils_hal_adapter_aidl", + ], + exclude_static_libs: [ + "VtsHalHidlTestUtils", + "libaidlvintf_gtest_helper", + ], + test_config: "AndroidTestHost.xml", + }, + }, } diff --git a/neuralnetworks/aidl/vts/functional/AndroidTest.xml b/neuralnetworks/aidl/vts/functional/AndroidTestDevice.xml index 384d42078f..384d42078f 100644 --- a/neuralnetworks/aidl/vts/functional/AndroidTest.xml +++ b/neuralnetworks/aidl/vts/functional/AndroidTestDevice.xml diff --git a/neuralnetworks/aidl/vts/functional/AndroidTestHost.xml b/neuralnetworks/aidl/vts/functional/AndroidTestHost.xml new file mode 100644 index 0000000000..7372a3148c --- /dev/null +++ b/neuralnetworks/aidl/vts/functional/AndroidTestHost.xml @@ -0,0 +1,22 @@ +<?xml version="1.0" encoding="utf-8"?> +<!-- Copyright (C) 2022 The Android Open Source Project + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> +<configuration description="Runs VtsHalNeuralnetworksTargetTest."> + <test class="com.android.tradefed.testtype.HostGTest" > + <option name="module-name" value="VtsHalNeuralnetworksTargetTest" /> + <option name="native-test-timeout" value="15m" /> + </test> +</configuration> + diff --git a/neuralnetworks/aidl/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/aidl/vts/functional/CompilationCachingTests.cpp index 77208aaf87..7451f7eec3 100644 --- a/neuralnetworks/aidl/vts/functional/CompilationCachingTests.cpp +++ b/neuralnetworks/aidl/vts/functional/CompilationCachingTests.cpp @@ -23,7 +23,6 @@ #include <fcntl.h> #include <ftw.h> #include <gtest/gtest.h> -#include <hidlmemory/mapping.h> #include <unistd.h> #include <cstdio> @@ -34,7 +33,6 @@ #include "Callbacks.h" #include "GeneratedTestHarness.h" -#include "MemoryUtils.h" #include "TestHarness.h" #include "Utils.h" #include "VtsHalNeuralnetworks.h" @@ -229,7 +227,11 @@ class CompilationCachingTestBase : public testing::Test { // Create cache directory. The cache directory and a temporary cache file is always created // to test the behavior of prepareModelFromCache, even when caching is not supported. +#ifdef __ANDROID__ char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX"; +#else // __ANDROID__ + char cacheDirTemp[] = "/tmp/TestCompilationCachingXXXXXX"; +#endif // __ANDROID__ char* cacheDir = mkdtemp(cacheDirTemp); ASSERT_NE(cacheDir, nullptr); mCacheDir = cacheDir; diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp index 2460fbad86..40f6cd1573 100644 --- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp @@ -20,7 +20,6 @@ #include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h> #include <android-base/logging.h> #include <android/binder_auto_utils.h> -#include <android/sync.h> #include <gtest/gtest.h> #include <algorithm> @@ -30,7 +29,6 @@ #include <numeric> #include <vector> -#include <MemoryUtils.h> #include <android/binder_status.h> #include <nnapi/Result.h> #include <nnapi/SharedMemory.h> @@ -43,6 +41,10 @@ #include "Utils.h" #include "VtsHalNeuralnetworks.h" +#ifdef __ANDROID__ +#include <android/sync.h> +#endif // __ANDROID__ + namespace aidl::android::hardware::neuralnetworks::vts::functional { namespace nn = ::android::nn; @@ -63,6 +65,8 @@ struct TestConfig { // it is skipped. The field is set to true by default and is set to false in // quantization coupling tests to suppress skipping a test bool reportSkipping; + // `useConfig` indicates if a test should use execute*WithConfig functions for the execution. + bool useConfig; TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType, bool reusable) : executor(executor), @@ -70,7 +74,8 @@ struct TestConfig { outputType(outputType), memoryType(memoryType), reusable(reusable), - reportSkipping(true) {} + reportSkipping(true), + useConfig(false) {} TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType, bool reusable, bool reportSkipping) : executor(executor), @@ -78,7 +83,17 @@ struct TestConfig { outputType(outputType), memoryType(memoryType), reusable(reusable), - reportSkipping(reportSkipping) {} + reportSkipping(reportSkipping), + useConfig(false) {} + TestConfig(Executor executor, bool measureTiming, OutputType outputType, MemoryType memoryType, + bool reusable, bool reportSkipping, bool useConfig) + : executor(executor), + measureTiming(measureTiming), + outputType(outputType), + memoryType(memoryType), + reusable(reusable), + reportSkipping(reportSkipping), + useConfig(useConfig) {} }; std::string toString(OutputType type) { @@ -100,7 +115,8 @@ std::string toString(const TestConfig& config) { << ", .measureTiming=" << (config.measureTiming ? "true" : "false") << ", .outputType=" << toString(config.outputType) << ", .memoryType=" << toString(config.memoryType) - << ", .reusable=" << (config.reusable ? "true" : "false") << "}"; + << ", .reusable=" << (config.reusable ? "true" : "false") + << ", .useConfig=" << (config.useConfig ? "true" : "false") << "}"; return ss.str(); } @@ -267,10 +283,14 @@ void copyTestBuffers(const std::vector<const TestBuffer*>& buffers, uint8_t* out } // namespace void waitForSyncFence(int syncFd) { - constexpr int kInfiniteTimeout = -1; ASSERT_GT(syncFd, 0); +#ifdef __ANDROID__ + constexpr int kInfiniteTimeout = -1; int r = sync_wait(syncFd, kInfiniteTimeout); ASSERT_GE(r, 0); +#else // __ANDROID__ + LOG(FATAL) << "waitForSyncFence not supported on host"; +#endif // __ANDROID__ } Model createModel(const TestModel& testModel) { @@ -587,8 +607,8 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, std::shared_ptr<IExecution> execution; if (testConfig.reusable) { - const auto ret = preparedModel->createReusableExecution(request, testConfig.measureTiming, - loopTimeoutDurationNs, &execution); + const auto ret = preparedModel->createReusableExecution( + request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, &execution); ASSERT_TRUE(ret.isOk()) << static_cast<nn::ErrorStatus>(ret.getServiceSpecificError()); ASSERT_NE(nullptr, execution.get()); } @@ -607,6 +627,10 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, ::ndk::ScopedAStatus ret; if (testConfig.reusable) { ret = execution->executeSynchronously(kNoDeadline, &executionResult); + } else if (testConfig.useConfig) { + ret = preparedModel->executeSynchronouslyWithConfig( + request, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, + kNoDeadline, &executionResult); } else { ret = preparedModel->executeSynchronously(request, testConfig.measureTiming, kNoDeadline, loopTimeoutDurationNs, @@ -649,9 +673,16 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, ExecutionResult executionResult; // execute - ret = burst->executeSynchronously(request, slots, testConfig.measureTiming, - kNoDeadline, loopTimeoutDurationNs, - &executionResult); + if (testConfig.useConfig) { + ret = burst->executeSynchronouslyWithConfig( + request, slots, + {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, kNoDeadline, + &executionResult); + } else { + ret = burst->executeSynchronously(request, slots, testConfig.measureTiming, + kNoDeadline, loopTimeoutDurationNs, + &executionResult); + } ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC) << ret.getDescription(); if (ret.isOk()) { @@ -680,6 +711,10 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, ::ndk::ScopedAStatus ret; if (testConfig.reusable) { ret = execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult); + } else if (testConfig.useConfig) { + ret = preparedModel->executeFencedWithConfig( + request, {}, {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, + kNoDeadline, kNoDuration, &executionResult); } else { ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming, kNoDeadline, loopTimeoutDurationNs, @@ -697,9 +732,19 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, waitFor.emplace_back(dupFd); // If a sync fence is returned, try start another run waiting for the sync // fence. - ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming, - kNoDeadline, loopTimeoutDurationNs, - kNoDuration, &executionResult); + if (testConfig.reusable) { + ret = execution->executeFenced(waitFor, kNoDeadline, kNoDuration, + &executionResult); + } else if (testConfig.useConfig) { + ret = preparedModel->executeFencedWithConfig( + request, waitFor, + {testConfig.measureTiming, loopTimeoutDurationNs, {}, {}}, + kNoDeadline, kNoDuration, &executionResult); + } else { + ret = preparedModel->executeFenced( + request, waitFor, testConfig.measureTiming, kNoDeadline, + loopTimeoutDurationNs, kNoDuration, &executionResult); + } ASSERT_TRUE(ret.isOk()); waitForSyncFence(executionResult.syncFence.get()); } @@ -830,11 +875,13 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, std::vector<Executor> executorList; std::vector<MemoryType> memoryTypeList; std::vector<bool> reusableList = {false}; + std::vector<bool> useConfigList = {false}; int deviceVersion; ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk()); if (deviceVersion >= kMinAidlLevelForFL8) { reusableList.push_back(true); + useConfigList.push_back(true); } switch (testKind) { @@ -854,7 +901,11 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, outputTypesList = {OutputType::FULLY_SPECIFIED}; measureTimingList = {false}; executorList = {Executor::SYNC, Executor::BURST, Executor::FENCED}; +#ifdef __ANDROID__ memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE}; +#else // __ANDROID__ + memoryTypeList = {MemoryType::DEVICE}; // BLOB_AHWB is not supported on the host. +#endif // __ANDROID__ } break; case TestKind::FENCED_COMPUTE: { outputTypesList = {OutputType::FULLY_SPECIFIED}; @@ -879,11 +930,14 @@ void EvaluatePreparedModel(const std::shared_ptr<IDevice>& device, for (const Executor executor : executorList) { for (const MemoryType memoryType : memoryTypeList) { for (const bool reusable : reusableList) { - if (executor == Executor::BURST && reusable) continue; - const TestConfig testConfig(executor, measureTiming, outputType, memoryType, - reusable); - SCOPED_TRACE(toString(testConfig)); - EvaluatePreparedModel(device, preparedModel, testModel, testConfig); + for (const bool useConfig : useConfigList) { + if ((useConfig || executor == Executor::BURST) && reusable) continue; + const TestConfig testConfig(executor, measureTiming, outputType, + memoryType, reusable, + /*reportSkipping=*/true, useConfig); + SCOPED_TRACE(toString(testConfig)); + EvaluatePreparedModel(device, preparedModel, testModel, testConfig); + } } } } @@ -942,6 +996,13 @@ void Execute(const std::shared_ptr<IDevice>& device, const TestModel& testModel, createPreparedModel(device, model, &preparedModel); if (preparedModel == nullptr) return; EvaluatePreparedModel(device, preparedModel, testModel, testKind); + int32_t deviceVersion; + ASSERT_TRUE(device->getInterfaceVersion(&deviceVersion).isOk()); + if (deviceVersion >= kMinAidlLevelForFL8) { + createPreparedModel(device, model, &preparedModel, /*reportSkipping*/ true, + /*useConfig*/ true); + EvaluatePreparedModel(device, preparedModel, testModel, testKind); + } } break; case TestKind::QUANTIZATION_COUPLING: { ASSERT_TRUE(testModel.hasQuant8CoupledOperands()); diff --git a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp index b3e9c633e3..f8341b15b4 100644 --- a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp +++ b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp @@ -17,6 +17,7 @@ #define LOG_TAG "neuralnetworks_aidl_hal_test" #include <aidl/android/hardware/graphics/common/PixelFormat.h> +#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h> #include <android-base/logging.h> #include <android/binder_auto_utils.h> #include <android/binder_interface_utils.h> @@ -33,7 +34,6 @@ #include "Callbacks.h" #include "GeneratedTestHarness.h" -#include "MemoryUtils.h" #include "Utils.h" #include "VtsHalNeuralnetworks.h" @@ -191,7 +191,7 @@ TestModel createSingleAddModel(const TestOperand& operand) { } // A placeholder invalid IPreparedModel class for MemoryDomainAllocateTest.InvalidPreparedModel -class InvalidPreparedModel : public BnPreparedModel { +class InvalidPreparedModel final : public IPreparedModel { public: ndk::ScopedAStatus executeSynchronously(const Request&, bool, int64_t, int64_t, ExecutionResult*) override { @@ -204,15 +204,37 @@ class InvalidPreparedModel : public BnPreparedModel { return ndk::ScopedAStatus::fromServiceSpecificError( static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE)); } + ndk::ScopedAStatus executeSynchronouslyWithConfig(const Request&, const ExecutionConfig&, + int64_t, ExecutionResult*) override { + return ndk::ScopedAStatus::fromServiceSpecificError( + static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE)); + } + ndk::ScopedAStatus executeFencedWithConfig(const Request&, + const std::vector<ndk::ScopedFileDescriptor>&, + const ExecutionConfig&, int64_t, int64_t, + FencedExecutionResult*) override { + return ndk::ScopedAStatus::fromServiceSpecificError( + static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE)); + } ndk::ScopedAStatus configureExecutionBurst(std::shared_ptr<IBurst>*) override { return ndk::ScopedAStatus::fromServiceSpecificError( static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE)); } - ndk::ScopedAStatus createReusableExecution(const aidl_hal::Request&, bool, int64_t, + ndk::ScopedAStatus createReusableExecution(const aidl_hal::Request&, const ExecutionConfig&, std::shared_ptr<aidl_hal::IExecution>*) override { return ndk::ScopedAStatus::fromServiceSpecificError( static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE)); } + ndk::ScopedAStatus getInterfaceVersion(int32_t* /*interfaceVersion*/) { + return ndk::ScopedAStatus::fromServiceSpecificError( + static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE)); + } + ndk::ScopedAStatus getInterfaceHash(std::string* /*interfaceHash*/) { + return ndk::ScopedAStatus::fromServiceSpecificError( + static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE)); + } + ndk::SpAIBinder asBinder() override { return ::ndk::SpAIBinder{}; } + bool isRemote() override { return true; } }; template <typename... Args> diff --git a/neuralnetworks/aidl/vts/functional/Utils.cpp b/neuralnetworks/aidl/vts/functional/Utils.cpp index efd5bca517..1bc76f2cb4 100644 --- a/neuralnetworks/aidl/vts/functional/Utils.cpp +++ b/neuralnetworks/aidl/vts/functional/Utils.cpp @@ -21,18 +21,20 @@ #include <aidl/android/hardware/neuralnetworks/OperandType.h> #include <android-base/logging.h> #include <android/binder_status.h> -#include <android/hardware_buffer.h> #include <sys/mman.h> #include <iostream> #include <limits> #include <numeric> -#include <MemoryUtils.h> #include <nnapi/SharedMemory.h> #include <nnapi/hal/aidl/Conversions.h> #include <nnapi/hal/aidl/Utils.h> +#ifdef __ANDROID__ +#include <android/hardware_buffer.h> +#endif // __ANDROID__ + namespace aidl::android::hardware::neuralnetworks { using test_helper::TestBuffer; @@ -140,7 +142,8 @@ std::unique_ptr<TestBlobAHWB> TestBlobAHWB::create(uint32_t size) { return ahwb->mIsValid ? std::move(ahwb) : nullptr; } -void TestBlobAHWB::initialize(uint32_t size) { +void TestBlobAHWB::initialize([[maybe_unused]] uint32_t size) { +#ifdef __ANDROID__ mIsValid = false; ASSERT_GT(size, 0); const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN; @@ -164,6 +167,9 @@ void TestBlobAHWB::initialize(uint32_t size) { mAidlMemory = utils::convert(mMemory).value(); mIsValid = true; +#else // __ANDROID__ + LOG(FATAL) << "TestBlobAHWB::initialize not supported on host"; +#endif // __ANDROID__ } std::string gtestCompliantName(std::string name) { diff --git a/neuralnetworks/aidl/vts/functional/Utils.h b/neuralnetworks/aidl/vts/functional/Utils.h index 0db3f8c7f8..4e0a4aafa3 100644 --- a/neuralnetworks/aidl/vts/functional/Utils.h +++ b/neuralnetworks/aidl/vts/functional/Utils.h @@ -18,7 +18,6 @@ #define ANDROID_HARDWARE_NEURALNETWORKS_AIDL_UTILS_H #include <android-base/logging.h> -#include <android/hardware_buffer.h> #include <gtest/gtest.h> #include <algorithm> diff --git a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp index fdc7eff96f..931ba258b3 100644 --- a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp @@ -77,6 +77,28 @@ static void validatePrepareModel(const std::shared_ptr<IDevice>& device, const s ASSERT_EQ(nullptr, preparedModel.get()); } +static void validatePrepareModelWithConfig(const std::shared_ptr<IDevice>& device, + const std::string& message, const Model& model, + ExecutionPreference preference, Priority priority) { + SCOPED_TRACE(message + " [prepareModelWithConfig]"); + + std::shared_ptr<PreparedModelCallback> preparedModelCallback = + ndk::SharedRefBase::make<PreparedModelCallback>(); + const auto prepareLaunchStatus = device->prepareModelWithConfig( + model, {preference, priority, kNoDeadline, {}, {}, kEmptyCacheToken, {}, {}}, + preparedModelCallback); + ASSERT_FALSE(prepareLaunchStatus.isOk()); + ASSERT_EQ(prepareLaunchStatus.getExceptionCode(), EX_SERVICE_SPECIFIC); + ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus.getServiceSpecificError()), + ErrorStatus::INVALID_ARGUMENT); + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); + std::shared_ptr<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); + ASSERT_EQ(nullptr, preparedModel.get()); +} + static bool validExecutionPreference(ExecutionPreference preference) { return preference == ExecutionPreference::LOW_POWER || preference == ExecutionPreference::FAST_SINGLE_ANSWER || @@ -103,6 +125,13 @@ static void validate(const std::shared_ptr<IDevice>& device, const std::string& } validatePrepareModel(device, message, model, preference, priority); + + int32_t aidlVersion; + ASSERT_TRUE(device->getInterfaceVersion(&aidlVersion).isOk()); + if (aidlVersion >= kMinAidlLevelForFL8) { + // prepareModelWithConfig must satisfy all requirements enforced by prepareModel. + validatePrepareModelWithConfig(device, message, model, preference, priority); + } } static uint32_t addOperand(Model* model) { diff --git a/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp b/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp index e8debf704c..d7498419a1 100644 --- a/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp +++ b/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp @@ -45,7 +45,7 @@ static void validateReusableExecution(const std::shared_ptr<IPreparedModel>& pre { SCOPED_TRACE(message + " [createReusableExecution]"); const auto createStatus = preparedModel->createReusableExecution( - request, measure, kOmittedTimeoutDuration, &execution); + request, {measure, kOmittedTimeoutDuration, {}, {}}, &execution); if (!createStatus.isOk()) { ASSERT_EQ(createStatus.getExceptionCode(), EX_SERVICE_SPECIFIC); ASSERT_EQ(static_cast<ErrorStatus>(createStatus.getServiceSpecificError()), @@ -149,10 +149,59 @@ static void validate(const std::shared_ptr<IPreparedModel>& preparedModel, int32_t aidlVersion; ASSERT_TRUE(preparedModel->getInterfaceVersion(&aidlVersion).isOk()); + if (aidlVersion < kMinAidlLevelForFL8) { + return; + } // validate reusable execution - if (aidlVersion >= kMinAidlLevelForFL8) { - validateReusableExecution(preparedModel, message, request, measure); + validateReusableExecution(preparedModel, message, request, measure); + + // synchronous with empty hints + { + SCOPED_TRACE(message + " [executeSynchronouslyWithConfig]"); + ExecutionResult executionResult; + const auto executeStatus = preparedModel->executeSynchronouslyWithConfig( + request, {measure, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, &executionResult); + ASSERT_FALSE(executeStatus.isOk()); + ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC); + ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()), + ErrorStatus::INVALID_ARGUMENT); + } + + // fenced with empty hints + { + SCOPED_TRACE(message + " [executeFencedWithConfig]"); + FencedExecutionResult executionResult; + const auto executeStatus = preparedModel->executeFencedWithConfig( + request, {}, {false, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, kNoDuration, + &executionResult); + ASSERT_FALSE(executeStatus.isOk()); + ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC); + ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()), + ErrorStatus::INVALID_ARGUMENT); + } + + // burst with empty hints + { + SCOPED_TRACE(message + " [burst executeSynchronouslyWithConfig]"); + + // create burst + std::shared_ptr<IBurst> burst; + auto ret = preparedModel->configureExecutionBurst(&burst); + ASSERT_TRUE(ret.isOk()) << ret.getDescription(); + ASSERT_NE(nullptr, burst.get()); + + // use -1 for all memory identifier tokens + const std::vector<int64_t> slots(request.pools.size(), -1); + + ExecutionResult executionResult; + const auto executeStatus = burst->executeSynchronouslyWithConfig( + request, slots, {measure, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, + &executionResult); + ASSERT_FALSE(executeStatus.isOk()); + ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC); + ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()), + ErrorStatus::INVALID_ARGUMENT); } } diff --git a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp index c417356005..51b4805134 100644 --- a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp +++ b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.cpp @@ -15,6 +15,7 @@ */ #define LOG_TAG "neuralnetworks_aidl_hal_test" + #include "VtsHalNeuralnetworks.h" #include <android-base/logging.h> @@ -28,20 +29,27 @@ #include <utility> #include <TestHarness.h> -#include <aidl/Vintf.h> #include <nnapi/hal/aidl/Conversions.h> #include "Callbacks.h" #include "GeneratedTestHarness.h" #include "Utils.h" +#ifdef __ANDROID__ +#include <aidl/Vintf.h> +#else // __ANDROID__ +#include <CanonicalDevice.h> +#include <nnapi/hal/aidl/Adapter.h> +#endif // __ANDROID__ + namespace aidl::android::hardware::neuralnetworks::vts::functional { using implementation::PreparedModelCallback; // internal helper function void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model, - std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping) { + std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping, + bool useConfig) { ASSERT_NE(nullptr, preparedModel); *preparedModel = nullptr; @@ -56,11 +64,25 @@ void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& mo // launch prepare model const std::shared_ptr<PreparedModelCallback> preparedModelCallback = ndk::SharedRefBase::make<PreparedModelCallback>(); - const auto prepareLaunchStatus = - device->prepareModel(model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, - kNoDeadline, {}, {}, kEmptyCacheToken, preparedModelCallback); - ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription(); - + if (useConfig) { + const auto prepareLaunchStatus = + device->prepareModelWithConfig(model, + {ExecutionPreference::FAST_SINGLE_ANSWER, + kDefaultPriority, + kNoDeadline, + {}, + {}, + kEmptyCacheToken, + {}, + {}}, + preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription(); + } else { + const auto prepareLaunchStatus = device->prepareModel( + model, ExecutionPreference::FAST_SINGLE_ANSWER, kDefaultPriority, kNoDeadline, {}, + {}, kEmptyCacheToken, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()) << prepareLaunchStatus.getDescription(); + } // retrieve prepared model preparedModelCallback->wait(); const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); @@ -96,6 +118,7 @@ void NeuralNetworksAidlTest::SetUp() { ASSERT_TRUE(deviceIsResponsive); } +#ifdef __ANDROID__ static NamedDevice makeNamedDevice(const std::string& name) { ndk::SpAIBinder binder(AServiceManager_waitForService(name.c_str())); return {name, IDevice::fromBinder(binder)}; @@ -112,6 +135,14 @@ static std::vector<NamedDevice> getNamedDevicesImpl() { std::transform(names.begin(), names.end(), std::back_inserter(namedDevices), makeNamedDevice); return namedDevices; } +#else // __ANDROID__ +static std::vector<NamedDevice> getNamedDevicesImpl() { + const std::string name = "nnapi-sample"; + auto device = std::make_shared<const ::android::nn::sample::Device>(name); + auto aidlDevice = adapter::adapt(device); + return {{name, aidlDevice}}; +} +#endif // __ANDROID__ const std::vector<NamedDevice>& getNamedDevices() { const static std::vector<NamedDevice> devices = getNamedDevicesImpl(); diff --git a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h index a900590791..00d705c521 100644 --- a/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h +++ b/neuralnetworks/aidl/vts/functional/VtsHalNeuralnetworks.h @@ -51,8 +51,8 @@ std::string printNeuralNetworksAidlTest( // Create an IPreparedModel object. If the model cannot be prepared, // "preparedModel" will be nullptr instead. void createPreparedModel(const std::shared_ptr<IDevice>& device, const Model& model, - std::shared_ptr<IPreparedModel>* preparedModel, - bool reportSkipping = true); + std::shared_ptr<IPreparedModel>* preparedModel, bool reportSkipping = true, + bool useConfig = false); enum class Executor { SYNC, BURST, FENCED }; |