diff options
author | Michael Butler <butlermichael@google.com> | 2022-02-23 20:56:56 -0800 |
---|---|---|
committer | Michael Butler <butlermichael@google.com> | 2022-03-01 16:16:26 -0800 |
commit | d65dc06ddc44d82e664ea19dc28b7d44d46aa656 (patch) | |
tree | 8cd7ae7c74dcb9f92afe904c511b31f7022bbdc8 /neuralnetworks/utils/adapter/hidl/src/Device.cpp | |
parent | 795f758780045821e87675c6abde99f59d0f13f0 (diff) |
Make NN canonical->HIDL adapter execute* methods synchronous
This change alters the asynchronous execute* methods to be handled
synchronously (from the same thread) for three reasons:
1) To remove the need to use IPreparedModel::getUnderlyingResource
2) To simplify the code
3) To make the code more performant
Bug: N/A
Test: mma
Test: presubmit
Change-Id: I2c37deb03d1b1c34b0173bd741e55fce4de757f7
Diffstat (limited to 'neuralnetworks/utils/adapter/hidl/src/Device.cpp')
-rw-r--r-- | neuralnetworks/utils/adapter/hidl/src/Device.cpp | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/neuralnetworks/utils/adapter/hidl/src/Device.cpp b/neuralnetworks/utils/adapter/hidl/src/Device.cpp index 0f44638722..305a1b484f 100644 --- a/neuralnetworks/utils/adapter/hidl/src/Device.cpp +++ b/neuralnetworks/utils/adapter/hidl/src/Device.cpp @@ -62,11 +62,11 @@ auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type> using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>; -sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor) { +sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel) { if (preparedModel == nullptr) { return nullptr; } - return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor)); + return sp<PreparedModel>::make(std::move(preparedModel)); } void notify(V1_0::IPreparedModelCallback* callback, nn::ErrorStatus status, @@ -105,14 +105,14 @@ void notify(V1_3::IPreparedModelCallback* callback, nn::ErrorStatus status, } template <typename CallbackType> -void notify(CallbackType* callback, PrepareModelResult result, Executor executor) { +void notify(CallbackType* callback, PrepareModelResult result) { if (!result.has_value()) { const auto [message, status] = std::move(result).error(); LOG(ERROR) << message; notify(callback, status, nullptr); } else { auto preparedModel = std::move(result).value(); - auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel), std::move(executor)); + auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel)); notify(callback, nn::ErrorStatus::NONE, std::move(hidlPreparedModel)); } } @@ -133,10 +133,10 @@ nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Execu auto nnModel = NN_TRY(convertInput(model)); - Task task = [device, nnModel = std::move(nnModel), executor, callback] { + Task task = [device, nnModel = std::move(nnModel), callback] { auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT, nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), {}); @@ -154,10 +154,10 @@ nn::GeneralResult<void> prepareModel_1_1(const nn::SharedDevice& device, const E auto nnModel = NN_TRY(convertInput(model)); const auto nnPreference = NN_TRY(convertInput(preference)); - Task task = [device, nnModel = std::move(nnModel), nnPreference, executor, callback] { + Task task = [device, nnModel = std::move(nnModel), nnPreference, callback] { auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), {}); @@ -183,10 +183,10 @@ nn::GeneralResult<void> prepareModel_1_2(const nn::SharedDevice& device, const E Task task = [device, nnModel = std::move(nnModel), nnPreference, nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache), - nnToken, executor, callback] { + nnToken, callback] { auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, nnModelCache, nnDataCache, nnToken, {}, {}); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), {}); @@ -213,10 +213,10 @@ nn::GeneralResult<void> prepareModel_1_3( Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline, nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache), - nnToken, executor, callback] { + nnToken, callback] { auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline, nnModelCache, nnDataCache, nnToken, {}, {}); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), nnDeadline); @@ -238,9 +238,9 @@ nn::GeneralResult<void> prepareModelFromCache(const nn::SharedDevice& device, const auto nnToken = nn::CacheToken(token); Task task = [device, nnModelCache = std::move(nnModelCache), - nnDataCache = std::move(nnDataCache), nnToken, executor, callback] { + nnDataCache = std::move(nnDataCache), nnToken, callback] { auto result = device->prepareModelFromCache({}, nnModelCache, nnDataCache, nnToken); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), {}); @@ -262,9 +262,9 @@ nn::GeneralResult<void> prepareModelFromCache_1_3( const auto nnToken = nn::CacheToken(token); auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache), - nnDataCache = std::move(nnDataCache), nnToken, executor, callback] { + nnDataCache = std::move(nnDataCache), nnToken, callback] { auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), nnDeadline); |