summaryrefslogtreecommitdiff
path: root/neuralnetworks/aidl/utils/src/PreparedModel.cpp
diff options
context:
space:
mode:
authorHaamed Gheibi <haamed@google.com>2022-02-09 14:35:06 -0800
committerHaamed Gheibi <haamed@google.com>2022-02-09 14:41:16 -0800
commitab52181d73b04e131fd72e32d69b5123a5d6892b (patch)
tree0ac86b537180b6fb97716b3058dfae44af9eaac7 /neuralnetworks/aidl/utils/src/PreparedModel.cpp
parentf99b35c293439db0b7436b47b939eb8c7bf21b51 (diff)
parent4d2548cfa7b86b79a516be9b60f6b666cc9af682 (diff)
Merge TP1A.220126.001
Change-Id: Ibf6bd2c20d9927fde8b2a05dde2b58bd8faea20f
Diffstat (limited to 'neuralnetworks/aidl/utils/src/PreparedModel.cpp')
-rw-r--r--neuralnetworks/aidl/utils/src/PreparedModel.cpp87
1 files changed, 62 insertions, 25 deletions
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index 6d1de569d0..7e3a31cac1 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -128,8 +128,9 @@ PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/,
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -141,30 +142,46 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Prepare
const auto aidlMeasure = NN_TRY(convert(measure));
const auto aidlDeadline = NN_TRY(convert(deadline));
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
- return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration,
- relocation);
+ return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration, hints,
+ extensionNameToPrefix, relocation);
}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
PreparedModel::executeInternal(const Request& request, bool measure, int64_t deadline,
int64_t loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
const hal::utils::RequestRelocation& relocation) const {
if (relocation.input) {
relocation.input->flush();
}
ExecutionResult executionResult;
- const auto ret = kPreparedModel->executeSynchronously(request, measure, deadline,
- loopTimeoutDuration, &executionResult);
- HANDLE_ASTATUS(ret) << "executeSynchronously failed";
+ if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+ auto aidlHints = NN_TRY(convert(hints));
+ auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+ const auto ret = kPreparedModel->executeSynchronouslyWithConfig(
+ request,
+ {measure, loopTimeoutDuration, std::move(aidlHints),
+ std::move(aidlExtensionPrefix)},
+ deadline, &executionResult);
+ HANDLE_ASTATUS(ret) << "executeSynchronouslyWithConfig failed";
+ } else {
+ const auto ret = kPreparedModel->executeSynchronously(
+ request, measure, deadline, loopTimeoutDuration, &executionResult);
+ HANDLE_ASTATUS(ret) << "executeSynchronously failed";
+ }
return handleExecutionResult(executionResult, relocation);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
- nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const {
+PreparedModel::executeFenced(
+ const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+ nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -179,31 +196,45 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::S
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
return executeFencedInternal(aidlRequest, aidlWaitFor, aidlMeasure, aidlDeadline,
- aidlLoopTimeoutDuration, aidlTimeoutDurationAfterFence,
- relocation);
+ aidlLoopTimeoutDuration, aidlTimeoutDurationAfterFence, hints,
+ extensionNameToPrefix, relocation);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFencedInternal(const Request& request,
- const std::vector<ndk::ScopedFileDescriptor>& waitFor,
- bool measure, int64_t deadline, int64_t loopTimeoutDuration,
- int64_t timeoutDurationAfterFence,
- const hal::utils::RequestRelocation& relocation) const {
+PreparedModel::executeFencedInternal(
+ const Request& request, const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measure,
+ int64_t deadline, int64_t loopTimeoutDuration, int64_t timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix,
+ const hal::utils::RequestRelocation& relocation) const {
if (relocation.input) {
relocation.input->flush();
}
FencedExecutionResult result;
- const auto ret =
- kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
- timeoutDurationAfterFence, &result);
- HANDLE_ASTATUS(ret) << "executeFenced failed";
+ if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+ auto aidlHints = NN_TRY(convert(hints));
+ auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+ const auto ret = kPreparedModel->executeFencedWithConfig(
+ request, waitFor,
+ {measure, loopTimeoutDuration, std::move(aidlHints),
+ std::move(aidlExtensionPrefix)},
+ deadline, timeoutDurationAfterFence, &result);
+ HANDLE_ASTATUS(ret) << "executeFencedWithConfig failed";
+ } else {
+ const auto ret = kPreparedModel->executeFenced(request, waitFor, measure, deadline,
+ loopTimeoutDuration,
+ timeoutDurationAfterFence, &result);
+ HANDLE_ASTATUS(ret) << "executeFenced failed";
+ }
return handleFencedExecutionResult(result, relocation);
}
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& hints,
+ const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -217,8 +248,14 @@ nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
std::shared_ptr<IExecution> execution;
+ auto aidlHints = NN_TRY(convert(hints));
+ auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix));
+
const auto ret = kPreparedModel->createReusableExecution(
- aidlRequest, aidlMeasure, aidlLoopTimeoutDuration, &execution);
+ aidlRequest,
+ {aidlMeasure, aidlLoopTimeoutDuration, std::move(aidlHints),
+ std::move(aidlExtensionPrefix)},
+ &execution);
HANDLE_ASTATUS(ret) << "createReusableExecution failed";
return Execution::create(std::move(execution), std::move(relocation));
}
@@ -232,7 +269,7 @@ nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() cons
std::shared_ptr<IBurst> burst;
const auto ret = kPreparedModel->configureExecutionBurst(&burst);
HANDLE_ASTATUS(ret) << "configureExecutionBurst failed";
- return Burst::create(std::move(burst));
+ return Burst::create(std::move(burst), kFeatureLevel);
}
std::any PreparedModel::getUnderlyingResource() const {