diff options
5 files changed, 49 insertions, 87 deletions
diff --git a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h index 4c0b3286de..80ed41dbb6 100644 --- a/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h +++ b/neuralnetworks/utils/adapter/aidl/include/nnapi/hal/aidl/Adapter.h @@ -46,9 +46,6 @@ using Executor = std::function<void(Task, ::android::nn::OptionalTimePoint)>; /** * Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object. * - * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache - * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource(). - * * @param device NNAPI canonical IDevice interface object to be adapted. * @param executor Type-erased executor to handle executing tasks asynchronously. * @return AIDL NN HAL IDevice interface object. @@ -58,9 +55,6 @@ std::shared_ptr<BnDevice> adapt(::android::nn::SharedDevice device, Executor exe /** * Adapt an NNAPI canonical interface object to a AIDL NN HAL interface object. * - * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache - * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource(). - * * This function uses a default executor, which will execute tasks from a detached thread. * * @param device NNAPI canonical IDevice interface object to be adapted. diff --git a/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h index 6fba4abe7d..3bd93e0051 100644 --- a/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h +++ b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/Adapter.h @@ -46,9 +46,6 @@ using Executor = std::function<void(Task, nn::OptionalTimePoint)>; /** * Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object. * - * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache - * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource(). - * * @param device NNAPI canonical IDevice interface object to be adapted. * @param executor Type-erased executor to handle executing tasks asynchronously. * @return HIDL NN HAL IDevice interface object. @@ -58,9 +55,6 @@ sp<V1_3::IDevice> adapt(nn::SharedDevice device, Executor executor); /** * Adapt an NNAPI canonical interface object to a HIDL NN HAL interface object. * - * The IPreparedModel object created from IDevice::prepareModel or IDevice::preparedModelFromCache - * must return "const nn::Model*" from IPreparedModel::getUnderlyingResource(). - * * This function uses a default executor, which will execute tasks from a detached thread. * * @param device NNAPI canonical IDevice interface object to be adapted. diff --git a/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h index 9482b0d512..01cd4bc76b 100644 --- a/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h +++ b/neuralnetworks/utils/adapter/hidl/include/nnapi/hal/PreparedModel.h @@ -39,7 +39,7 @@ namespace android::hardware::neuralnetworks::adapter { // Class that adapts nn::IPreparedModel to V1_3::IPreparedModel. class PreparedModel final : public V1_3::IPreparedModel { public: - PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor); + explicit PreparedModel(nn::SharedPreparedModel preparedModel); Return<V1_0::ErrorStatus> execute(const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) override; @@ -70,7 +70,6 @@ class PreparedModel final : public V1_3::IPreparedModel { private: const nn::SharedPreparedModel kPreparedModel; - const Executor kExecutor; }; } // namespace android::hardware::neuralnetworks::adapter diff --git a/neuralnetworks/utils/adapter/hidl/src/Device.cpp b/neuralnetworks/utils/adapter/hidl/src/Device.cpp index 0f44638722..305a1b484f 100644 --- a/neuralnetworks/utils/adapter/hidl/src/Device.cpp +++ b/neuralnetworks/utils/adapter/hidl/src/Device.cpp @@ -62,11 +62,11 @@ auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type> using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>; -sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor) { +sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel) { if (preparedModel == nullptr) { return nullptr; } - return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor)); + return sp<PreparedModel>::make(std::move(preparedModel)); } void notify(V1_0::IPreparedModelCallback* callback, nn::ErrorStatus status, @@ -105,14 +105,14 @@ void notify(V1_3::IPreparedModelCallback* callback, nn::ErrorStatus status, } template <typename CallbackType> -void notify(CallbackType* callback, PrepareModelResult result, Executor executor) { +void notify(CallbackType* callback, PrepareModelResult result) { if (!result.has_value()) { const auto [message, status] = std::move(result).error(); LOG(ERROR) << message; notify(callback, status, nullptr); } else { auto preparedModel = std::move(result).value(); - auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel), std::move(executor)); + auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel)); notify(callback, nn::ErrorStatus::NONE, std::move(hidlPreparedModel)); } } @@ -133,10 +133,10 @@ nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Execu auto nnModel = NN_TRY(convertInput(model)); - Task task = [device, nnModel = std::move(nnModel), executor, callback] { + Task task = [device, nnModel = std::move(nnModel), callback] { auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT, nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), {}); @@ -154,10 +154,10 @@ nn::GeneralResult<void> prepareModel_1_1(const nn::SharedDevice& device, const E auto nnModel = NN_TRY(convertInput(model)); const auto nnPreference = NN_TRY(convertInput(preference)); - Task task = [device, nnModel = std::move(nnModel), nnPreference, executor, callback] { + Task task = [device, nnModel = std::move(nnModel), nnPreference, callback] { auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {}); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), {}); @@ -183,10 +183,10 @@ nn::GeneralResult<void> prepareModel_1_2(const nn::SharedDevice& device, const E Task task = [device, nnModel = std::move(nnModel), nnPreference, nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache), - nnToken, executor, callback] { + nnToken, callback] { auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, nnModelCache, nnDataCache, nnToken, {}, {}); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), {}); @@ -213,10 +213,10 @@ nn::GeneralResult<void> prepareModel_1_3( Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline, nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache), - nnToken, executor, callback] { + nnToken, callback] { auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline, nnModelCache, nnDataCache, nnToken, {}, {}); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), nnDeadline); @@ -238,9 +238,9 @@ nn::GeneralResult<void> prepareModelFromCache(const nn::SharedDevice& device, const auto nnToken = nn::CacheToken(token); Task task = [device, nnModelCache = std::move(nnModelCache), - nnDataCache = std::move(nnDataCache), nnToken, executor, callback] { + nnDataCache = std::move(nnDataCache), nnToken, callback] { auto result = device->prepareModelFromCache({}, nnModelCache, nnDataCache, nnToken); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), {}); @@ -262,9 +262,9 @@ nn::GeneralResult<void> prepareModelFromCache_1_3( const auto nnToken = nn::CacheToken(token); auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache), - nnDataCache = std::move(nnDataCache), nnToken, executor, callback] { + nnDataCache = std::move(nnDataCache), nnToken, callback] { auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken); - notify(callback.get(), std::move(result), executor); + notify(callback.get(), std::move(result)); }; executor(std::move(task), nnDeadline); diff --git a/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp index c6055a6747..3570a74a6d 100644 --- a/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp +++ b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp @@ -55,15 +55,6 @@ auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type> return result; } -nn::GeneralResult<nn::Version> validateRequestForModel(const nn::Request& request, - const nn::Model& model) { - nn::GeneralResult<nn::Version> version = nn::validateRequestForModel(request, model); - if (!version.ok()) { - version.error().code = nn::ErrorStatus::INVALID_ARGUMENT; - } - return version; -} - class FencedExecutionCallback final : public V1_3::IFencedExecutionCallback { public: explicit FencedExecutionCallback(const nn::ExecuteFencedInfoCallback& callback) @@ -144,58 +135,48 @@ void notify(CallbackType* callback, ExecutionResult result) { } nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel, - const Executor& executor, const V1_0::Request& request, + const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) { if (callback.get() == nullptr) { return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; } - auto nnRequest = NN_TRY(convertInput(request)); + const auto nnRequest = NN_TRY(convertInput(request)); - const std::any resource = preparedModel->getUnderlyingResource(); - if (const auto* model = std::any_cast<const nn::Model*>(&resource)) { - CHECK(*model != nullptr); - NN_TRY(adapter::validateRequestForModel(nnRequest, **model)); - } + auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {}, {}, {}); - Task task = [preparedModel, nnRequest = std::move(nnRequest), callback] { - auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {}, {}, {}); - notify(callback.get(), std::move(result)); - }; - executor(std::move(task), {}); + if (!result.ok() && result.error().code == nn::ErrorStatus::INVALID_ARGUMENT) { + const auto& [message, code, outputShapes] = result.error(); + return nn::error(code) << message; + } + notify(callback.get(), std::move(result)); return {}; } nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel, - const Executor& executor, const V1_0::Request& request, - V1_2::MeasureTiming measure, + const V1_0::Request& request, V1_2::MeasureTiming measure, const sp<V1_2::IExecutionCallback>& callback) { if (callback.get() == nullptr) { return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; } - auto nnRequest = NN_TRY(convertInput(request)); + const auto nnRequest = NN_TRY(convertInput(request)); const auto nnMeasure = NN_TRY(convertInput(measure)); - const std::any resource = preparedModel->getUnderlyingResource(); - if (const auto* model = std::any_cast<const nn::Model*>(&resource)) { - CHECK(*model != nullptr); - NN_TRY(adapter::validateRequestForModel(nnRequest, **model)); - } + auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {}, {}, {}); - Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, callback] { - auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {}, {}, {}); - notify(callback.get(), std::move(result)); - }; - executor(std::move(task), {}); + if (!result.ok() && result.error().code == nn::ErrorStatus::INVALID_ARGUMENT) { + const auto& [message, code, outputShapes] = result.error(); + return nn::error(code) << message; + } + notify(callback.get(), std::move(result)); return {}; } nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel, - const Executor& executor, const V1_3::Request& request, - V1_2::MeasureTiming measure, + const V1_3::Request& request, V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline, const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, const sp<V1_3::IExecutionCallback>& callback) { @@ -203,25 +184,20 @@ nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback"; } - auto nnRequest = NN_TRY(convertInput(request)); + const auto nnRequest = NN_TRY(convertInput(request)); const auto nnMeasure = NN_TRY(convertInput(measure)); const auto nnDeadline = NN_TRY(convertInput(deadline)); const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration)); - const std::any resource = preparedModel->getUnderlyingResource(); - if (const auto* model = std::any_cast<const nn::Model*>(&resource)) { - CHECK(*model != nullptr); - NN_TRY(adapter::validateRequestForModel(nnRequest, **model)); - } + auto result = + preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration, {}, {}); - Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, nnDeadline, - nnLoopTimeoutDuration, callback] { - auto result = preparedModel->execute(nnRequest, nnMeasure, nnDeadline, - nnLoopTimeoutDuration, {}, {}); - notify(callback.get(), std::move(result)); - }; - executor(std::move(task), nnDeadline); + if (!result.ok() && result.error().code == nn::ErrorStatus::INVALID_ARGUMENT) { + const auto& [message, code, outputShapes] = result.error(); + return nn::error(code) << message; + } + notify(callback.get(), std::move(result)); return {}; } @@ -304,10 +280,9 @@ nn::GeneralResult<std::pair<hidl_handle, sp<V1_3::IFencedExecutionCallback>>> ex } // namespace -PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor) - : kPreparedModel(std::move(preparedModel)), kExecutor(std::move(executor)) { +PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel) + : kPreparedModel(std::move(preparedModel)) { CHECK(kPreparedModel != nullptr); - CHECK(kExecutor != nullptr); } nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const { @@ -316,7 +291,7 @@ nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const { Return<V1_0::ErrorStatus> PreparedModel::execute(const V1_0::Request& request, const sp<V1_0::IExecutionCallback>& callback) { - auto result = adapter::execute(kPreparedModel, kExecutor, request, callback); + auto result = adapter::execute(kPreparedModel, request, callback); if (!result.has_value()) { auto [message, code] = std::move(result).error(); LOG(ERROR) << "adapter::PreparedModel::execute failed with " << code << ": " << message; @@ -329,7 +304,7 @@ Return<V1_0::ErrorStatus> PreparedModel::execute(const V1_0::Request& request, Return<V1_0::ErrorStatus> PreparedModel::execute_1_2(const V1_0::Request& request, V1_2::MeasureTiming measure, const sp<V1_2::IExecutionCallback>& callback) { - auto result = adapter::execute_1_2(kPreparedModel, kExecutor, request, measure, callback); + auto result = adapter::execute_1_2(kPreparedModel, request, measure, callback); if (!result.has_value()) { auto [message, code] = std::move(result).error(); LOG(ERROR) << "adapter::PreparedModel::execute_1_2 failed with " << code << ": " << message; @@ -344,7 +319,7 @@ Return<V1_3::ErrorStatus> PreparedModel::execute_1_3( const V1_3::OptionalTimePoint& deadline, const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, const sp<V1_3::IExecutionCallback>& callback) { - auto result = adapter::execute_1_3(kPreparedModel, kExecutor, request, measure, deadline, + auto result = adapter::execute_1_3(kPreparedModel, request, measure, deadline, loopTimeoutDuration, callback); if (!result.has_value()) { auto [message, code] = std::move(result).error(); @@ -405,8 +380,8 @@ Return<void> PreparedModel::configureExecutionBurst( cb(V1_2::utils::convert(code).value(), nullptr); return Void(); } - auto burstContext = std::move(result).value(); - cb(V1_0::ErrorStatus::NONE, std::move(burstContext)); + const auto burstContext = std::move(result).value(); + cb(V1_0::ErrorStatus::NONE, burstContext); return Void(); } |