diff options
author | Xusong Wang <xusongw@google.com> | 2021-10-05 14:10:41 -0700 |
---|---|---|
committer | Xusong Wang <xusongw@google.com> | 2022-01-18 10:51:29 -0800 |
commit | 11f30c81ef7d45fd97bf0dcc3f3ba2ae42689504 (patch) | |
tree | 251f0fb62aa0f9d55b4a0d121f41f73507261163 /neuralnetworks/aidl/utils/src | |
parent | 018fc3cac8b2361ad28a97685f9449a99326c9cd (diff) |
Reusable execution at HAL level -- HAL.
This CL modifies the canonical/AIDL adapter to use IExecution object
if available.
Bug: 202405342
Bug: 202431255
Test: NNT_static
Test: CtsNNAPITestCases
Test: VtsHalNeuralnetworksTargetTest
Change-Id: I6aac3c57f97ac87a5ba3f78cfd843fcc403decff
Merged-In: I6aac3c57f97ac87a5ba3f78cfd843fcc403decff
(cherry picked from commit 7f5c7d293c2dad462dc9c0f1f1a160fb2c2c9a9b)
Diffstat (limited to 'neuralnetworks/aidl/utils/src')
-rw-r--r-- | neuralnetworks/aidl/utils/src/Callbacks.cpp | 7 | ||||
-rw-r--r-- | neuralnetworks/aidl/utils/src/Device.cpp | 4 | ||||
-rw-r--r-- | neuralnetworks/aidl/utils/src/Execution.cpp | 45 | ||||
-rw-r--r-- | neuralnetworks/aidl/utils/src/PreparedModel.cpp | 158 |
4 files changed, 143 insertions, 71 deletions
diff --git a/neuralnetworks/aidl/utils/src/Callbacks.cpp b/neuralnetworks/aidl/utils/src/Callbacks.cpp index 8084970690..554f3faa73 100644 --- a/neuralnetworks/aidl/utils/src/Callbacks.cpp +++ b/neuralnetworks/aidl/utils/src/Callbacks.cpp @@ -38,16 +38,17 @@ namespace { // nn::kVersionFeatureLevel5. On failure, this function returns with the appropriate // nn::GeneralError. nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback( - ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel) { + ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel, + nn::Version featureLevel) { HANDLE_STATUS_AIDL(status) << "model preparation failed with " << toString(status); - return NN_TRY(PreparedModel::create(preparedModel)); + return NN_TRY(PreparedModel::create(preparedModel, featureLevel)); } } // namespace ndk::ScopedAStatus PreparedModelCallback::notify( ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel) { - mData.put(prepareModelCallback(status, preparedModel)); + mData.put(prepareModelCallback(status, preparedModel, kFeatureLevel)); return ndk::ScopedAStatus::ok(); } diff --git a/neuralnetworks/aidl/utils/src/Device.cpp b/neuralnetworks/aidl/utils/src/Device.cpp index 5b7ec4ebd7..bad10ed347 100644 --- a/neuralnetworks/aidl/utils/src/Device.cpp +++ b/neuralnetworks/aidl/utils/src/Device.cpp @@ -229,7 +229,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const auto aidlDataCache = NN_TRY(convert(dataCache)); const auto aidlToken = NN_TRY(convert(token)); - const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(); + const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(kFeatureLevel); const auto scoped = kDeathHandler.protectCallback(cb.get()); const auto ret = kDevice->prepareModel(aidlModel, aidlPreference, aidlPriority, aidlDeadline, @@ -247,7 +247,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache( const auto aidlDataCache = NN_TRY(convert(dataCache)); const auto aidlToken = NN_TRY(convert(token)); - const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(); + const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(kFeatureLevel); const auto scoped = kDeathHandler.protectCallback(cb.get()); const auto ret = kDevice->prepareModelFromCache(aidlDeadline, aidlModelCache, aidlDataCache, diff --git a/neuralnetworks/aidl/utils/src/Execution.cpp b/neuralnetworks/aidl/utils/src/Execution.cpp index 94edd90c89..c4add636e5 100644 --- a/neuralnetworks/aidl/utils/src/Execution.cpp +++ b/neuralnetworks/aidl/utils/src/Execution.cpp @@ -35,36 +35,39 @@ namespace aidl::android::hardware::neuralnetworks::utils { -nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create( - std::shared_ptr<const PreparedModel> preparedModel, Request request, - hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration) { +nn::GeneralResult<std::shared_ptr<const ExecutionWithCachedRequest>> +ExecutionWithCachedRequest::create(std::shared_ptr<const PreparedModel> preparedModel, + Request request, hal::utils::RequestRelocation relocation, + bool measure, int64_t loopTimeoutDuration) { if (preparedModel == nullptr) { - return NN_ERROR() << "aidl::utils::Execution::create must have non-null preparedModel"; + return NN_ERROR() << "aidl::utils::ExecutionWithCachedRequest::create must have non-null " + "preparedModel"; } - return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel), - std::move(request), std::move(relocation), measure, - loopTimeoutDuration); + return std::make_shared<const ExecutionWithCachedRequest>( + PrivateConstructorTag{}, std::move(preparedModel), std::move(request), + std::move(relocation), measure, loopTimeoutDuration); } -Execution::Execution(PrivateConstructorTag /*tag*/, - std::shared_ptr<const PreparedModel> preparedModel, Request request, - hal::utils::RequestRelocation relocation, bool measure, - int64_t loopTimeoutDuration) +ExecutionWithCachedRequest::ExecutionWithCachedRequest( + PrivateConstructorTag /*tag*/, std::shared_ptr<const PreparedModel> preparedModel, + Request request, hal::utils::RequestRelocation relocation, bool measure, + int64_t loopTimeoutDuration) : kPreparedModel(std::move(preparedModel)), kRequest(std::move(request)), kRelocation(std::move(relocation)), kMeasure(measure), kLoopTimeoutDuration(loopTimeoutDuration) {} -nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute( - const nn::OptionalTimePoint& deadline) const { +nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> +ExecutionWithCachedRequest::compute(const nn::OptionalTimePoint& deadline) const { const auto aidlDeadline = NN_TRY(convert(deadline)); return kPreparedModel->executeInternal(kRequest, kMeasure, aidlDeadline, kLoopTimeoutDuration, kRelocation); } -nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced( +nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> +ExecutionWithCachedRequest::computeFenced( const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& timeoutDurationAfterFence) const { const auto aidlWaitFor = NN_TRY(convert(waitFor)); @@ -75,4 +78,18 @@ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execu aidlTimeoutDurationAfterFence, kRelocation); } +nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create( + std::shared_ptr<aidl_hal::IExecution> execution, hal::utils::RequestRelocation relocation) { + if (execution == nullptr) { + return NN_ERROR() << "aidl::utils::Execution::create must have non-null execution"; + } + + return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(execution), + std::move(relocation)); +} + +Execution::Execution(PrivateConstructorTag /*tag*/, std::shared_ptr<aidl_hal::IExecution> execution, + hal::utils::RequestRelocation relocation) + : kExecution(std::move(execution)), kRelocation(std::move(relocation)) {} + } // namespace aidl::android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp index f25c2c8825..6d1de569d0 100644 --- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp +++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp @@ -54,21 +54,77 @@ nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionResul return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced))); } +nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> handleExecutionResult( + const ExecutionResult& result, const hal::utils::RequestRelocation& relocation) { + if (!result.outputSufficientSize) { + auto canonicalOutputShapes = + nn::convert(result.outputShapes).value_or(std::vector<nn::OutputShape>{}); + return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) + << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; + } + auto [outputShapes, timing] = + NN_TRY(convertExecutionResults(result.outputShapes, result.timing)); + + if (relocation.output) { + relocation.output->flush(); + } + return std::make_pair(std::move(outputShapes), timing); +} + +nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> +handleFencedExecutionResult(const FencedExecutionResult& result, + const hal::utils::RequestRelocation& relocation) { + auto resultSyncFence = nn::SyncFence::createAsSignaled(); + if (result.syncFence.get() != -1) { + resultSyncFence = nn::SyncFence::create(NN_TRY(nn::convert(result.syncFence))).value(); + } + + auto callback = result.callback; + if (callback == nullptr) { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "callback is null"; + } + + // If computeFenced required the request memory to be moved into shared memory, block here until + // the fenced execution has completed and flush the memory back. + if (relocation.output) { + const auto state = resultSyncFence.syncWait({}); + if (state != nn::SyncFence::FenceState::SIGNALED) { + return NN_ERROR() << "syncWait failed with " << state; + } + relocation.output->flush(); + } + + // Create callback which can be used to retrieve the execution error status and timings. + nn::ExecuteFencedInfoCallback resultCallback = + [callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> { + ErrorStatus errorStatus; + Timing timingLaunched; + Timing timingFenced; + const auto ret = callback->getExecutionInfo(&timingLaunched, &timingFenced, &errorStatus); + HANDLE_ASTATUS(ret) << "fenced execution callback getExecutionInfo failed"; + return convertFencedExecutionResults(errorStatus, timingLaunched, timingFenced); + }; + + return std::make_pair(std::move(resultSyncFence), std::move(resultCallback)); +} + } // namespace nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create( - std::shared_ptr<aidl_hal::IPreparedModel> preparedModel) { + std::shared_ptr<aidl_hal::IPreparedModel> preparedModel, nn::Version featureLevel) { if (preparedModel == nullptr) { return NN_ERROR() << "aidl_hal::utils::PreparedModel::create must have non-null preparedModel"; } - return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel)); + return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel), + featureLevel); } PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/, - std::shared_ptr<aidl_hal::IPreparedModel> preparedModel) - : kPreparedModel(std::move(preparedModel)) {} + std::shared_ptr<aidl_hal::IPreparedModel> preparedModel, + nn::Version featureLevel) + : kPreparedModel(std::move(preparedModel)), kFeatureLevel(featureLevel) {} nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute( const nn::Request& request, nn::MeasureTiming measure, @@ -101,19 +157,7 @@ PreparedModel::executeInternal(const Request& request, bool measure, int64_t dea const auto ret = kPreparedModel->executeSynchronously(request, measure, deadline, loopTimeoutDuration, &executionResult); HANDLE_ASTATUS(ret) << "executeSynchronously failed"; - if (!executionResult.outputSufficientSize) { - auto canonicalOutputShapes = - nn::convert(executionResult.outputShapes).value_or(std::vector<nn::OutputShape>{}); - return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes)) - << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; - } - auto [outputShapes, timing] = - NN_TRY(convertExecutionResults(executionResult.outputShapes, executionResult.timing)); - - if (relocation.output) { - relocation.output->flush(); - } - return std::make_pair(std::move(outputShapes), timing); + return handleExecutionResult(executionResult, relocation); } nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> @@ -154,39 +198,7 @@ PreparedModel::executeFencedInternal(const Request& request, kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration, timeoutDurationAfterFence, &result); HANDLE_ASTATUS(ret) << "executeFenced failed"; - - auto resultSyncFence = nn::SyncFence::createAsSignaled(); - if (result.syncFence.get() != -1) { - resultSyncFence = nn::SyncFence::create(NN_TRY(nn::convert(result.syncFence))).value(); - } - - auto callback = result.callback; - if (callback == nullptr) { - return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "callback is null"; - } - - // If executeFenced required the request memory to be moved into shared memory, block here until - // the fenced execution has completed and flush the memory back. - if (relocation.output) { - const auto state = resultSyncFence.syncWait({}); - if (state != nn::SyncFence::FenceState::SIGNALED) { - return NN_ERROR() << "syncWait failed with " << state; - } - relocation.output->flush(); - } - - // Create callback which can be used to retrieve the execution error status and timings. - nn::ExecuteFencedInfoCallback resultCallback = - [callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> { - ErrorStatus errorStatus; - Timing timingLaunched; - Timing timingFenced; - const auto ret = callback->getExecutionInfo(&timingLaunched, &timingFenced, &errorStatus); - HANDLE_ASTATUS(ret) << "fenced execution callback getExecutionInfo failed"; - return convertFencedExecutionResults(errorStatus, timingLaunched, timingFenced); - }; - - return std::make_pair(std::move(resultSyncFence), std::move(resultCallback)); + return handleFencedExecutionResult(result, relocation); } nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution( @@ -202,8 +214,18 @@ nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution( auto aidlRequest = NN_TRY(convert(requestInShared)); auto aidlMeasure = NN_TRY(convert(measure)); auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration)); - return Execution::create(shared_from_this(), std::move(aidlRequest), std::move(relocation), - aidlMeasure, aidlLoopTimeoutDuration); + + if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) { + std::shared_ptr<IExecution> execution; + const auto ret = kPreparedModel->createReusableExecution( + aidlRequest, aidlMeasure, aidlLoopTimeoutDuration, &execution); + HANDLE_ASTATUS(ret) << "createReusableExecution failed"; + return Execution::create(std::move(execution), std::move(relocation)); + } + + return ExecutionWithCachedRequest::create(shared_from_this(), std::move(aidlRequest), + std::move(relocation), aidlMeasure, + aidlLoopTimeoutDuration); } nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const { @@ -218,4 +240,36 @@ std::any PreparedModel::getUnderlyingResource() const { return resource; } +nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute( + const nn::OptionalTimePoint& deadline) const { + const auto aidlDeadline = NN_TRY(convert(deadline)); + + if (kRelocation.input) { + kRelocation.input->flush(); + } + + ExecutionResult executionResult; + auto ret = kExecution->executeSynchronously(aidlDeadline, &executionResult); + HANDLE_ASTATUS(ret) << "executeSynchronously failed"; + return handleExecutionResult(executionResult, kRelocation); +} + +nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced( + const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline, + const nn::OptionalDuration& timeoutDurationAfterFence) const { + const auto aidlWaitFor = NN_TRY(convert(waitFor)); + const auto aidlDeadline = NN_TRY(convert(deadline)); + const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence)); + + if (kRelocation.input) { + kRelocation.input->flush(); + } + + FencedExecutionResult result; + const auto ret = kExecution->executeFenced(aidlWaitFor, aidlDeadline, + aidlTimeoutDurationAfterFence, &result); + HANDLE_ASTATUS(ret) << "executeFenced failed"; + return handleFencedExecutionResult(result, kRelocation); +} + } // namespace aidl::android::hardware::neuralnetworks::utils |