diff options
Diffstat (limited to 'neuralnetworks/1.2/utils/src')
-rw-r--r-- | neuralnetworks/1.2/utils/src/Burst.cpp | 16 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/src/Conversions.cpp | 6 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/src/Device.cpp | 4 | ||||
-rw-r--r-- | neuralnetworks/1.2/utils/src/PreparedModel.cpp | 21 |
4 files changed, 29 insertions, 18 deletions
diff --git a/neuralnetworks/1.2/utils/src/Burst.cpp b/neuralnetworks/1.2/utils/src/Burst.cpp index 911fbfa981..23e80709a0 100644 --- a/neuralnetworks/1.2/utils/src/Burst.cpp +++ b/neuralnetworks/1.2/utils/src/Burst.cpp @@ -305,8 +305,9 @@ Burst::OptionalCacheHold Burst::cacheMemory(const nn::SharedMemory& memory) cons nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::execute( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { // This is the first point when we know an execution is occurring, so begin to collect // systraces. Note that the first point we can begin collecting systraces in // ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so @@ -317,7 +318,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst:: // fall back to another execution path if (!compliantVersion(request).ok()) { // fallback to another execution path if the packet could not be sent - return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration); + return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, {}, {}); } // ensure that request is ready for IPC @@ -346,7 +347,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst:: // send request packet const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots); const auto fallback = [this, &request, measure, &deadline, &loopTimeoutDuration] { - return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration); + return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration, {}, {}); }; return executeInternal(requestPacket, relocation, fallback); } @@ -354,14 +355,17 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst:: // See IBurst::createReusableExecution for information on this method. nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "Burst::createReusableExecution"); // if the request is valid but of a higher version than what's supported in burst execution, // fall back to another execution path if (!compliantVersion(request).ok()) { // fallback to another execution path if the packet could not be sent - return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration); + return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration, {}, + {}); } // ensure that request is ready for IPC diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp index 838d9c4717..62ec2ed6c6 100644 --- a/neuralnetworks/1.2/utils/src/Conversions.cpp +++ b/neuralnetworks/1.2/utils/src/Conversions.cpp @@ -212,9 +212,9 @@ GeneralResult<Model> unvalidatedConvert(const hal::V1_2::Model& model) { }; } -GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert( +GeneralResult<ExtensionNameAndPrefix> unvalidatedConvert( const hal::V1_2::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { - return Model::ExtensionNameAndPrefix{ + return ExtensionNameAndPrefix{ .name = extensionNameAndPrefix.name, .prefix = extensionNameAndPrefix.prefix, }; @@ -495,7 +495,7 @@ nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) { } nn::GeneralResult<Model::ExtensionNameAndPrefix> unvalidatedConvert( - const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { + const nn::ExtensionNameAndPrefix& extensionNameAndPrefix) { return Model::ExtensionNameAndPrefix{ .name = extensionNameAndPrefix.name, .prefix = extensionNameAndPrefix.prefix, diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp index e7acecdf7a..3a58d2c7cc 100644 --- a/neuralnetworks/1.2/utils/src/Device.cpp +++ b/neuralnetworks/1.2/utils/src/Device.cpp @@ -236,7 +236,9 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const { + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { // Ensure that model is ready for IPC. std::optional<nn::Model> maybeModelInShared; const nn::Model& modelInShared = diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp index 6df3df332a..feb3951a4a 100644 --- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp +++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp @@ -91,7 +91,9 @@ PreparedModel::executeAsynchronously(const V1_0::Request& request, MeasureTiming nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute( const nn::Request& request, nn::MeasureTiming measure, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { // Ensure that request is ready for IPC. std::optional<nn::Request> maybeRequestInShared; hal::utils::RequestRelocation relocation; @@ -123,19 +125,22 @@ PreparedModel::executeInternal(const V1_0::Request& request, MeasureTiming measu } nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> -PreparedModel::executeFenced(const nn::Request& /*request*/, - const std::vector<nn::SyncFence>& /*waitFor*/, - nn::MeasureTiming /*measure*/, - const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/, - const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { +PreparedModel::executeFenced( + const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/, + nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const nn::OptionalDuration& /*timeoutDurationAfterFence*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "IPreparedModel::executeFenced is not supported on 1.2 HAL service"; } nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { // Ensure that request is ready for IPC. std::optional<nn::Request> maybeRequestInShared; hal::utils::RequestRelocation relocation; |