diff options
Diffstat (limited to 'neuralnetworks/1.1/utils')
-rw-r--r-- | neuralnetworks/1.1/utils/Android.bp | 14 | ||||
-rw-r--r-- | neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h | 5 | ||||
-rw-r--r-- | neuralnetworks/1.1/utils/src/Device.cpp | 4 | ||||
-rw-r--r-- | neuralnetworks/1.1/utils/test/DeviceTest.cpp | 14 |
4 files changed, 15 insertions, 22 deletions
diff --git a/neuralnetworks/1.1/utils/Android.bp b/neuralnetworks/1.1/utils/Android.bp index 737ff58159..4b8999f559 100644 --- a/neuralnetworks/1.1/utils/Android.bp +++ b/neuralnetworks/1.1/utils/Android.bp @@ -31,16 +31,11 @@ cc_library_static { export_include_dirs: ["include"], cflags: ["-Wthread-safety"], static_libs: [ - "neuralnetworks_types", - "neuralnetworks_utils_hal_common", - "neuralnetworks_utils_hal_1_0", - ], - shared_libs: [ "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", - ], - export_static_lib_headers: [ + "neuralnetworks_types", "neuralnetworks_utils_hal_common", + "neuralnetworks_utils_hal_1_0", ], } @@ -52,20 +47,15 @@ cc_test { "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", "libgmock", - "libneuralnetworks_common", "neuralnetworks_types", "neuralnetworks_utils_hal_common", "neuralnetworks_utils_hal_1_0", "neuralnetworks_utils_hal_1_1", ], shared_libs: [ - "android.hidl.allocator@1.0", - "android.hidl.memory@1.0", "libbase", "libcutils", - "libfmq", "libhidlbase", - "libhidlmemory", "liblog", "libutils", ], diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h index d6bd36a7fe..38ca1382be 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h @@ -64,8 +64,9 @@ class Device final : public nn::IDevice { nn::GeneralResult<nn::SharedPreparedModel> prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, - const nn::CacheToken& token) const override; + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp index 3effa8428d..28f3276389 100644 --- a/neuralnetworks/1.1/utils/src/Device.cpp +++ b/neuralnetworks/1.1/utils/src/Device.cpp @@ -143,7 +143,9 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, - const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { + const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { // Ensure that model is ready for IPC. std::optional<nn::Model> maybeModelInShared; const nn::Model& modelInShared = diff --git a/neuralnetworks/1.1/utils/test/DeviceTest.cpp b/neuralnetworks/1.1/utils/test/DeviceTest.cpp index 2248da6ffe..8ab87bc97a 100644 --- a/neuralnetworks/1.1/utils/test/DeviceTest.cpp +++ b/neuralnetworks/1.1/utils/test/DeviceTest.cpp @@ -390,7 +390,7 @@ TEST(DeviceTest, prepareModel) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_TRUE(result.has_value()) @@ -409,7 +409,7 @@ TEST(DeviceTest, prepareModelLaunchError) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -427,7 +427,7 @@ TEST(DeviceTest, prepareModelReturnError) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -445,7 +445,7 @@ TEST(DeviceTest, prepareModelNullptrError) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -462,7 +462,7 @@ TEST(DeviceTest, prepareModelTransportFailure) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -479,7 +479,7 @@ TEST(DeviceTest, prepareModelDeadObject) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -498,7 +498,7 @@ TEST(DeviceTest, prepareModelAsyncCrash) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); |