diff options
Diffstat (limited to 'neuralnetworks/1.2/utils')
-rw-r--r-- | neuralnetworks/1.2/utils/Android.bp | 15 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h | 9 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h | 4 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h | 5 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h | 13 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/src/Burst.cpp | 16 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/src/Conversions.cpp | 6 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/src/Device.cpp | 4 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/src/PreparedModel.cpp | 21 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/test/DeviceTest.cpp | 14 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/test/PreparedModelTest.cpp | 44 |
11 files changed, 81 insertions, 70 deletions
diff --git a/neuralnetworks/1.2/utils/Android.bp b/neuralnetworks/1.2/utils/Android.bp index 4eefb0f7dc..4c5f065dda 100644 --- a/neuralnetworks/1.2/utils/Android.bp +++ b/neuralnetworks/1.2/utils/Android.bp @@ -31,19 +31,14 @@ cc_library_static { export_include_dirs: ["include"], cflags: ["-Wthread-safety"], static_libs: [ - "neuralnetworks_types", - "neuralnetworks_utils_hal_common", - "neuralnetworks_utils_hal_1_0", - "neuralnetworks_utils_hal_1_1", - ], - shared_libs: [ "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", "android.hardware.neuralnetworks@1.2", "libfmq", - ], - export_static_lib_headers: [ + "neuralnetworks_types", "neuralnetworks_utils_hal_common", + "neuralnetworks_utils_hal_1_0", + "neuralnetworks_utils_hal_1_1", ], product_variables: { debuggable: { // eng and userdebug builds @@ -71,7 +66,6 @@ cc_test { "android.hardware.neuralnetworks@1.1", "android.hardware.neuralnetworks@1.2", "libgmock", - "libneuralnetworks_common", "neuralnetworks_types", "neuralnetworks_utils_hal_common", "neuralnetworks_utils_hal_1_0", @@ -79,13 +73,10 @@ cc_test { "neuralnetworks_utils_hal_1_2", ], shared_libs: [ - "android.hidl.allocator@1.0", - "android.hidl.memory@1.0", "libbase", "libcutils", "libfmq", "libhidlbase", - "libhidlmemory", "liblog", "libutils", ], diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h index ac9411c462..1b28476a88 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Burst.h @@ -170,13 +170,16 @@ class Burst final : public nn::IBurst, public std::enable_shared_from_this<Burst // See IBurst::execute for information on this method. nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; // See IBurst::createReusableExecution for information on this method. nn::GeneralResult<nn::SharedExecution> createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; // If fallback is not nullptr, this method will invoke the fallback function to try another // execution path if the packet could not be sent. Otherwise, failing to send the packet will diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h index c3348aa8f2..4f13adc0a3 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h @@ -37,7 +37,7 @@ GeneralResult<Operand> unvalidatedConvert(const hal::V1_2::Operand& operand); GeneralResult<Operand::ExtraParams> unvalidatedConvert( const hal::V1_2::Operand::ExtraParams& extraParams); GeneralResult<Model> unvalidatedConvert(const hal::V1_2::Model& model); -GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert( +GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert( const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix); GeneralResult<OutputShape> unvalidatedConvert(const hal::V1_2::OutputShape& outputShape); GeneralResult<MeasureTiming> unvalidatedConvert(const hal::V1_2::MeasureTiming& measureTiming); @@ -78,7 +78,7 @@ nn::GeneralResult<Operand::ExtraParams> unvalidatedConvert( const nn::Operand::ExtraParams& extraParams); nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model); nn::GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert( - const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix); + const nn::ExtensionNameAndPrefix& extensionNameAndPrefix); nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape); nn::GeneralResult<MeasureTiming> unvalidatedConvert(const nn::MeasureTiming& measureTiming); nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing); diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h index e7ac172211..d92cf50aa3 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h @@ -83,8 +83,9 @@ class Device final : public nn::IDevice { nn::GeneralResult<nn::SharedPreparedModel> prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, - const nn::CacheToken& token) const override; + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h index 1150e5e79b..72a5b2f007 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h @@ -49,18 +49,23 @@ class PreparedModel final : public nn::IPreparedModel, nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced( const nn::Request& request, const std::vector<nn::SyncFence>& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, - const nn::OptionalDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& timeoutDurationAfterFence, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedExecution> createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override; diff --git a/neuralnetworks/1.2/utils/src/Burst.cpp b/neuralnetworks/1.2/utils/src/Burst.cpp index 911fbfa981..23e80709a0 100644 --- a/neuralnetworks/1.2/utils/src/Burst.cpp +++ b/neuralnetworks/1.2/utils/src/Burst.cpp @@ -305,8 +305,9 @@ Burst::OptionalCacheHold Burst::cacheMemory(const nn::SharedMemory& memory) cons nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { // This is the first point when we know an execution is occurring, so begin to collect // systraces. Note that the first point we can begin collecting systraces in // ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so @@ -317,7 +318,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst:: // fall back to another execution path if (!compliantVersion(request).ok()) { // fallback to another execution path if the packet could not be sent - return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration); + return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, {}, {}); } // ensure that request is ready for IPC @@ -346,7 +347,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst:: // send request packet const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots); const auto fallback = [this, &request, measure, &deadline, &loopTimeoutDuration] { - return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration); + return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, {}, {}); }; return executeInternal(requestPacket, relocation, fallback); } @@ -354,14 +355,17 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst:: // See IBurst::createReusableExecution for information on this method. nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "Burst::createReusableExecution"); // if the request is valid but of a higher version than what's supported in burst execution, // fall back to another execution path if (!compliantVersion(request).ok()) { // fallback to another execution path if the packet could not be sent - return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration); + return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration, {}, + {}); } // ensure that request is ready for IPC diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index 838d9c4717..62ec2ed6c6 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -212,9 +212,9 @@ GeneralResult<Model> unvalidatedConvert(const hal::V1_2::Model& model) { }; } -GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert( +GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert( const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { - return Model::ExtensionNameAndPrefix{ + return ExtensionNameAndPrefix{ .name = extensionNameAndPrefix.name, .prefix = extensionNameAndPrefix.prefix, }; @@ -495,7 +495,7 @@ nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) { } nn::GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert( - const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { + const nn::ExtensionNameAndPrefix& extensionNameAndPrefix) { return Model::ExtensionNameAndPrefix{ .name = extensionNameAndPrefix.name, .prefix = extensionNameAndPrefix.prefix, diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp index e7acecdf7a..3a58d2c7cc 100644 --- a/neuralnetworks/1.2/utils/src/Device.cpp +++ b/neuralnetworks/1.2/utils/src/Device.cpp @@ -236,7 +236,9 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const { + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { // Ensure that model is ready for IPC. std::optional<nn::Model> maybeModelInShared; const nn::Model& modelInShared = diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp index 6df3df332a..feb3951a4a 100644 --- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp @@ -91,7 +91,9 @@ PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { // Ensure that request is ready for IPC. std::optional<nn::Request> maybeRequestInShared; hal::utils::RequestRelocation relocation; @@ -123,19 +125,22 @@ PreparedModel::executeInternal(const V1_0::Request& request, MeasureTiming measu } nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> -PreparedModel::executeFenced(const nn::Request& /*request*/, - const std::vector<nn::SyncFence>& /*waitFor*/, - nn::MeasureTiming /*measure*/, - const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/, - const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { +PreparedModel::executeFenced( + const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/, + nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const nn::OptionalDuration& /*timeoutDurationAfterFence*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IPreparedModel::executeFenced is not supported on 1.2 HAL service"; } nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { // Ensure that request is ready for IPC. std::optional<nn::Request> maybeRequestInShared; hal::utils::RequestRelocation relocation; diff --git a/neuralnetworks/1.2/utils/test/DeviceTest.cpp b/neuralnetworks/1.2/utils/test/DeviceTest.cpp index 1dc6285be5..0d8c141582 100644 --- a/neuralnetworks/1.2/utils/test/DeviceTest.cpp +++ b/neuralnetworks/1.2/utils/test/DeviceTest.cpp @@ -636,7 +636,7 @@ TEST(DeviceTest, prepareModel) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_TRUE(result.has_value()) @@ -655,7 +655,7 @@ TEST(DeviceTest, prepareModelLaunchError) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -673,7 +673,7 @@ TEST(DeviceTest, prepareModelReturnError) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -691,7 +691,7 @@ TEST(DeviceTest, prepareModelNullptrError) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -708,7 +708,7 @@ TEST(DeviceTest, prepareModelTransportFailure) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -725,7 +725,7 @@ TEST(DeviceTest, prepareModelDeadObject) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -746,7 +746,7 @@ TEST(DeviceTest, prepareModelAsyncCrash) { // run test const auto result = device->prepareModel(kSimpleModel, nn::ExecutionPreference::DEFAULT, - nn::Priority::DEFAULT, {}, {}, {}, {}); + nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); diff --git a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp index 5e2ad79df5..a5ec9d36cd 100644 --- a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp +++ b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp @@ -154,7 +154,7 @@ TEST(PreparedModelTest, executeSync) { .WillOnce(Invoke(makeExecuteSynchronously(V1_0::ErrorStatus::NONE, {}, kNoTiming))); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result EXPECT_TRUE(result.has_value()) @@ -172,7 +172,7 @@ TEST(PreparedModelTest, executeSyncError) { makeExecuteSynchronously(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming))); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -189,7 +189,7 @@ TEST(PreparedModelTest, executeSyncTransportFailure) { .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -206,7 +206,7 @@ TEST(PreparedModelTest, executeSyncDeadObject) { .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -224,7 +224,7 @@ TEST(PreparedModelTest, executeAsync) { V1_0::ErrorStatus::NONE, {}, kNoTiming))); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result EXPECT_TRUE(result.has_value()) @@ -243,7 +243,7 @@ TEST(PreparedModelTest, executeAsyncLaunchError) { kNoTiming))); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -261,7 +261,7 @@ TEST(PreparedModelTest, executeAsyncReturnError) { V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming))); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -278,7 +278,7 @@ TEST(PreparedModelTest, executeAsyncTransportFailure) { .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -295,7 +295,7 @@ TEST(PreparedModelTest, executeAsyncDeadObject) { .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -314,7 +314,7 @@ TEST(PreparedModelTest, executeAsyncCrash) { EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(InvokeWithoutArgs(ret)); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -328,7 +328,7 @@ TEST(PreparedModelTest, executeFencedNotSupported) { PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value(); // run test - const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}); + const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -347,7 +347,7 @@ TEST(PreparedModelTest, reusableExecuteSync) { Invoke(makeExecuteSynchronously(V1_0::ErrorStatus::NONE, {}, kNoTiming))); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); @@ -371,7 +371,7 @@ TEST(PreparedModelTest, reusableExecuteSyncError) { makeExecuteSynchronously(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming))); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); @@ -392,7 +392,7 @@ TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) { .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); @@ -413,7 +413,7 @@ TEST(PreparedModelTest, reusableExecuteSyncDeadObject) { .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); @@ -436,7 +436,7 @@ TEST(PreparedModelTest, reusableExecuteAsync) { V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {}, kNoTiming))); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); @@ -461,7 +461,7 @@ TEST(PreparedModelTest, reusableExecuteAsyncLaunchError) { kNoTiming))); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); @@ -483,7 +483,7 @@ TEST(PreparedModelTest, reusableExecuteAsyncReturnError) { V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming))); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); @@ -504,7 +504,7 @@ TEST(PreparedModelTest, reusableExecuteAsyncTransportFailure) { .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure)); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); @@ -525,7 +525,7 @@ TEST(PreparedModelTest, reusableExecuteAsyncDeadObject) { .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure)); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); @@ -548,7 +548,7 @@ TEST(PreparedModelTest, reusableExecuteAsyncCrash) { EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(InvokeWithoutArgs(ret)); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); @@ -566,7 +566,7 @@ TEST(PreparedModelTest, reusableExecuteFencedNotSupported) { PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value(); // create execution - const auto createResult = preparedModel->createReusableExecution({}, {}, {}); + const auto createResult = preparedModel->createReusableExecution({}, {}, {}, {}, {}); ASSERT_TRUE(createResult.has_value()) << "Failed with " << createResult.error().code << ": " << createResult.error().message; ASSERT_NE(createResult.value(), nullptr); |