diff options
Diffstat (limited to 'neuralnetworks/utils/common/src')
6 files changed, 94 insertions, 52 deletions
diff --git a/neuralnetworks/utils/common/src/InvalidBurst.cpp b/neuralnetworks/utils/common/src/InvalidBurst.cpp index 01915337c4..3fdfb5cec3 100644 --- a/neuralnetworks/utils/common/src/InvalidBurst.cpp +++ b/neuralnetworks/utils/common/src/InvalidBurst.cpp @@ -34,13 +34,17 @@ InvalidBurst::OptionalCacheHold InvalidBurst::cacheMemory( nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidBurst::execute( const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidBurst"; } nn::GeneralResult<nn::SharedExecution> InvalidBurst::createReusableExecution( const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidBurst"; } diff --git a/neuralnetworks/utils/common/src/InvalidDevice.cpp b/neuralnetworks/utils/common/src/InvalidDevice.cpp index 535ccb41c7..c8cc287573 100644 --- a/neuralnetworks/utils/common/src/InvalidDevice.cpp +++ b/neuralnetworks/utils/common/src/InvalidDevice.cpp @@ -84,7 +84,9 @@ nn::GeneralResult<nn::SharedPreparedModel> InvalidDevice::prepareModel( const nn::Model& /*model*/, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, - const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { + const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidDevice"; } diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp index 8195462ba8..f6f978d8c8 100644 --- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp +++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp @@ -27,9 +27,12 @@ namespace android::hardware::neuralnetworks::utils { nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> -InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, - const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { +InvalidPreparedModel::execute( + const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, + const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidPreparedModel"; } @@ -38,13 +41,17 @@ InvalidPreparedModel::executeFenced( const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/, nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, const nn::OptionalDuration& /*loopTimeoutDuration*/, - const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { + const nn::OptionalDuration& /*timeoutDurationAfterFence*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidPreparedModel"; } nn::GeneralResult<nn::SharedExecution> InvalidPreparedModel::createReusableExecution( const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidPreparedModel"; } diff --git a/neuralnetworks/utils/common/src/ResilientBurst.cpp b/neuralnetworks/utils/common/src/ResilientBurst.cpp index 79cbe3991f..bf7a8ea130 100644 --- a/neuralnetworks/utils/common/src/ResilientBurst.cpp +++ b/neuralnetworks/utils/common/src/ResilientBurst.cpp @@ -105,37 +105,49 @@ ResilientBurst::OptionalCacheHold ResilientBurst::cacheMemory( nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> ResilientBurst::execute( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const { - const auto fn = [&request, measure, deadline, loopTimeoutDuration](const nn::IBurst& burst) { - return burst.execute(request, measure, deadline, loopTimeoutDuration); + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { + const auto fn = [&request, measure, deadline, loopTimeoutDuration, &hints, + &extensionNameToPrefix](const nn::IBurst& burst) { + return burst.execute(request, measure, deadline, loopTimeoutDuration, hints, + extensionNameToPrefix); }; return protect(*this, fn); } nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { #if 0 auto self = shared_from_this(); - ResilientExecution::Factory makeExecution = - [burst = std::move(self), request, measure, loopTimeoutDuration] { - return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration); + ResilientExecution::Factory makeExecution = [burst = std::move(self), request, measure, + loopTimeoutDuration, &hints, + &extensionNameToPrefix] { + return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints, + extensionNameToPrefix); }; return ResilientExecution::create(std::move(makeExecution)); #else - return createReusableExecutionInternal(request, measure, loopTimeoutDuration); + return createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints, + extensionNameToPrefix); #endif } nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecutionInternal( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { if (!isValidInternal()) { return std::make_shared<const InvalidExecution>(); } - const auto fn = [&request, measure, &loopTimeoutDuration](const nn::IBurst& burst) { - return burst.createReusableExecution(request, measure, loopTimeoutDuration); + const auto fn = [&request, measure, &loopTimeoutDuration, &hints, + &extensionNameToPrefix](const nn::IBurst& burst) { + return burst.createReusableExecution(request, measure, loopTimeoutDuration, hints, + extensionNameToPrefix); }; return protect(*this, fn); } diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp index 2023c9af30..a5c2640b76 100644 --- a/neuralnetworks/utils/common/src/ResilientDevice.cpp +++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp @@ -179,19 +179,21 @@ nn::GeneralResult<std::vector<bool>> ResilientDevice::getSupportedOperations( nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const { + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { #if 0 auto self = shared_from_this(); ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), model, preference, priority, deadline, modelCache, - dataCache, token] { + dataCache, token, hints, extensionNameToPrefix] { return device->prepareModelInternal(model, preference, priority, deadline, modelCache, - dataCache, token); + dataCache, token, hints, extensionNameToPrefix); }; return ResilientPreparedModel::create(std::move(makePreparedModel)); #else - return prepareModelInternal(model, preference, priority, deadline, modelCache, dataCache, - token); + return prepareModelInternal(model, preference, priority, deadline, modelCache, dataCache, token, + hints, extensionNameToPrefix); #endif } @@ -234,14 +236,16 @@ bool ResilientDevice::isValidInternal() const { nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const { + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { if (!isValidInternal()) { return std::make_shared<const InvalidPreparedModel>(); } - const auto fn = [&model, preference, priority, &deadline, &modelCache, &dataCache, - &token](const nn::IDevice& device) { + const auto fn = [&model, preference, priority, &deadline, &modelCache, &dataCache, &token, + &hints, &extensionNameToPrefix](const nn::IDevice& device) { return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache, - token); + token, hints, extensionNameToPrefix); }; return protect(*this, fn, /*blocking=*/false); } diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp index 1ae19bc6ca..b5843c0fd4 100644 --- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp +++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp @@ -104,43 +104,53 @@ nn::GeneralResult<nn::SharedPreparedModel> ResilientPreparedModel::recover( } nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> -ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const { - const auto fn = [&request, measure, &deadline, - &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) { - return preparedModel.execute(request, measure, deadline, loopTimeoutDuration); +ResilientPreparedModel::execute( + const nn::Request& request, nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { + const auto fn = [&request, measure, &deadline, &loopTimeoutDuration, &hints, + &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) { + return preparedModel.execute(request, measure, deadline, loopTimeoutDuration, hints, + extensionNameToPrefix); }; return protect(*this, fn); } nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> -ResilientPreparedModel::executeFenced(const nn::Request& request, - const std::vector<nn::SyncFence>& waitFor, - nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration, - const nn::OptionalDuration& timeoutDurationAfterFence) const { +ResilientPreparedModel::executeFenced( + const nn::Request& request, const std::vector<nn::SyncFence>& waitFor, + nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { const auto fn = [&request, &waitFor, measure, &deadline, &loopTimeoutDuration, - &timeoutDurationAfterFence](const nn::IPreparedModel& preparedModel) { + &timeoutDurationAfterFence, &hints, + &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) { return preparedModel.executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration, - timeoutDurationAfterFence); + timeoutDurationAfterFence, hints, extensionNameToPrefix); }; return protect(*this, fn); } nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { #if 0 auto self = shared_from_this(); - ResilientExecution::Factory makeExecution = - [preparedModel = std::move(self), request, measure, loopTimeoutDuration] { - return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration); + ResilientExecution::Factory makeExecution = [preparedModel = std::move(self), request, measure, + loopTimeoutDuration, hints, + extensionNameToPrefix] { + return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration, + hints, extensionNameToPrefix); }; return ResilientExecution::create(std::move(makeExecution)); #else - return createReusableExecutionInternal(request, measure, loopTimeoutDuration); + return createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints, + extensionNameToPrefix); #endif } @@ -159,13 +169,16 @@ nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBur nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecutionInternal( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { if (!isValidInternal()) { return std::make_shared<const InvalidExecution>(); } - const auto fn = [&request, measure, - &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) { - return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration); + const auto fn = [&request, measure, &loopTimeoutDuration, &hints, + &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) { + return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration, hints, + extensionNameToPrefix); }; return protect(*this, fn); } |