diff options
Diffstat (limited to 'neuralnetworks/utils/common')
17 files changed, 186 insertions, 118 deletions
diff --git a/neuralnetworks/utils/common/Android.bp b/neuralnetworks/utils/common/Android.bp index 39927a363c..bfba24fc1f 100644 --- a/neuralnetworks/utils/common/Android.bp +++ b/neuralnetworks/utils/common/Android.bp @@ -39,20 +39,12 @@ cc_test { srcs: ["test/*.cpp"], static_libs: [ "libgmock", - "libneuralnetworks_common", "neuralnetworks_types", "neuralnetworks_utils_hal_common", ], shared_libs: [ - "android.hidl.allocator@1.0", - "android.hidl.memory@1.0", "libbase", "libcutils", - "libfmq", - "libhidlbase", - "libhidlmemory", - "liblog", - "libutils", ], target: { android: { diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h index e86eddab88..1f1245f1bd 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h @@ -33,12 +33,15 @@ class InvalidBurst final : public nn::IBurst { nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedExecution> createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; }; } // namespace android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h index 5e62b9ae0b..9582873009 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h @@ -52,8 +52,9 @@ class InvalidDevice final : public nn::IDevice { nn::GeneralResult<nn::SharedPreparedModel> prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, - const nn::CacheToken& token) const override; + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h index de30aaefc9..3f1f2904d8 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h @@ -31,18 +31,23 @@ class InvalidPreparedModel final : public nn::IPreparedModel { public: nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced( const nn::Request& request, const std::vector<nn::SyncFence>& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, - const nn::OptionalDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& timeoutDurationAfterFence, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedExecution> createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override; diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h index fde2486a53..129431f3cf 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h @@ -48,18 +48,23 @@ class ResilientBurst final : public nn::IBurst, nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedExecution> createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; private: bool isValidInternal() const EXCLUDES(mMutex); nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const; + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const; const Factory kMakeBurst; mutable std::mutex mMutex; diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h index 84ae799aad..267d6346cf 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h @@ -65,8 +65,9 @@ class ResilientDevice final : public nn::IDevice, nn::GeneralResult<nn::SharedPreparedModel> prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, - const nn::CacheToken& token) const override; + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache( nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, @@ -83,7 +84,9 @@ class ResilientDevice final : public nn::IDevice, nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const; + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const; nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCacheInternal( nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const; diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h index 86533edd12..bbfc220a3b 100644 --- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h +++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h @@ -49,18 +49,23 @@ class ResilientPreparedModel final : public nn::IPreparedModel, nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced( const nn::Request& request, const std::vector<nn::SyncFence>& waitFor, nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, - const nn::OptionalDuration& timeoutDurationAfterFence) const override; + const nn::OptionalDuration& timeoutDurationAfterFence, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedExecution> createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const override; + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override; nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override; @@ -70,7 +75,9 @@ class ResilientPreparedModel final : public nn::IPreparedModel, bool isValidInternal() const EXCLUDES(mMutex); nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const; + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& metaData, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const; nn::GeneralResult<nn::SharedBurst> configureExecutionBurstInternal() const; const Factory kMakePreparedModel; diff --git a/neuralnetworks/utils/common/src/InvalidBurst.cpp b/neuralnetworks/utils/common/src/InvalidBurst.cpp index 01915337c4..3fdfb5cec3 100644 --- a/neuralnetworks/utils/common/src/InvalidBurst.cpp +++ b/neuralnetworks/utils/common/src/InvalidBurst.cpp @@ -34,13 +34,17 @@ InvalidBurst::OptionalCacheHold InvalidBurst::cacheMemory( nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidBurst::execute( const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidBurst"; } nn::GeneralResult<nn::SharedExecution> InvalidBurst::createReusableExecution( const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidBurst"; } diff --git a/neuralnetworks/utils/common/src/InvalidDevice.cpp b/neuralnetworks/utils/common/src/InvalidDevice.cpp index 535ccb41c7..c8cc287573 100644 --- a/neuralnetworks/utils/common/src/InvalidDevice.cpp +++ b/neuralnetworks/utils/common/src/InvalidDevice.cpp @@ -84,7 +84,9 @@ nn::GeneralResult<nn::SharedPreparedModel> InvalidDevice::prepareModel( const nn::Model& /*model*/, nn::ExecutionPreference /*preference*/, nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/, - const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const { + const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidDevice"; } diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp index 8195462ba8..f6f978d8c8 100644 --- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp +++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp @@ -27,9 +27,12 @@ namespace android::hardware::neuralnetworks::utils { nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> -InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, - const nn::OptionalTimePoint& /*deadline*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { +InvalidPreparedModel::execute( + const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, + const nn::OptionalTimePoint& /*deadline*/, + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidPreparedModel"; } @@ -38,13 +41,17 @@ InvalidPreparedModel::executeFenced( const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/, nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/, const nn::OptionalDuration& /*loopTimeoutDuration*/, - const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const { + const nn::OptionalDuration& /*timeoutDurationAfterFence*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidPreparedModel"; } nn::GeneralResult<nn::SharedExecution> InvalidPreparedModel::createReusableExecution( const nn::Request& /*request*/, nn::MeasureTiming /*measure*/, - const nn::OptionalDuration& /*loopTimeoutDuration*/) const { + const nn::OptionalDuration& /*loopTimeoutDuration*/, + const std::vector<nn::TokenValuePair>& /*hints*/, + const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const { return NN_ERROR() << "InvalidPreparedModel"; } diff --git a/neuralnetworks/utils/common/src/ResilientBurst.cpp b/neuralnetworks/utils/common/src/ResilientBurst.cpp index 79cbe3991f..bf7a8ea130 100644 --- a/neuralnetworks/utils/common/src/ResilientBurst.cpp +++ b/neuralnetworks/utils/common/src/ResilientBurst.cpp @@ -105,37 +105,49 @@ ResilientBurst::OptionalCacheHold ResilientBurst::cacheMemory( nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> ResilientBurst::execute( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const { - const auto fn = [&request, measure, deadline, loopTimeoutDuration](const nn::IBurst& burst) { - return burst.execute(request, measure, deadline, loopTimeoutDuration); + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { + const auto fn = [&request, measure, deadline, loopTimeoutDuration, &hints, + &extensionNameToPrefix](const nn::IBurst& burst) { + return burst.execute(request, measure, deadline, loopTimeoutDuration, hints, + extensionNameToPrefix); }; return protect(*this, fn); } nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { #if 0 auto self = shared_from_this(); - ResilientExecution::Factory makeExecution = - [burst = std::move(self), request, measure, loopTimeoutDuration] { - return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration); + ResilientExecution::Factory makeExecution = [burst = std::move(self), request, measure, + loopTimeoutDuration, &hints, + &extensionNameToPrefix] { + return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints, + extensionNameToPrefix); }; return ResilientExecution::create(std::move(makeExecution)); #else - return createReusableExecutionInternal(request, measure, loopTimeoutDuration); + return createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints, + extensionNameToPrefix); #endif } nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecutionInternal( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { if (!isValidInternal()) { return std::make_shared<const InvalidExecution>(); } - const auto fn = [&request, measure, &loopTimeoutDuration](const nn::IBurst& burst) { - return burst.createReusableExecution(request, measure, loopTimeoutDuration); + const auto fn = [&request, measure, &loopTimeoutDuration, &hints, + &extensionNameToPrefix](const nn::IBurst& burst) { + return burst.createReusableExecution(request, measure, loopTimeoutDuration, hints, + extensionNameToPrefix); }; return protect(*this, fn); } diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp index 2023c9af30..a5c2640b76 100644 --- a/neuralnetworks/utils/common/src/ResilientDevice.cpp +++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp @@ -179,19 +179,21 @@ nn::GeneralResult<std::vector<bool>> ResilientDevice::getSupportedOperations( nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModel( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const { + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { #if 0 auto self = shared_from_this(); ResilientPreparedModel::Factory makePreparedModel = [device = std::move(self), model, preference, priority, deadline, modelCache, - dataCache, token] { + dataCache, token, hints, extensionNameToPrefix] { return device->prepareModelInternal(model, preference, priority, deadline, modelCache, - dataCache, token); + dataCache, token, hints, extensionNameToPrefix); }; return ResilientPreparedModel::create(std::move(makePreparedModel)); #else - return prepareModelInternal(model, preference, priority, deadline, modelCache, dataCache, - token); + return prepareModelInternal(model, preference, priority, deadline, modelCache, dataCache, token, + hints, extensionNameToPrefix); #endif } @@ -234,14 +236,16 @@ bool ResilientDevice::isValidInternal() const { nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal( const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority, nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache, - const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const { + const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { if (!isValidInternal()) { return std::make_shared<const InvalidPreparedModel>(); } - const auto fn = [&model, preference, priority, &deadline, &modelCache, &dataCache, - &token](const nn::IDevice& device) { + const auto fn = [&model, preference, priority, &deadline, &modelCache, &dataCache, &token, + &hints, &extensionNameToPrefix](const nn::IDevice& device) { return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache, - token); + token, hints, extensionNameToPrefix); }; return protect(*this, fn, /*blocking=*/false); } diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp index 1ae19bc6ca..b5843c0fd4 100644 --- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp +++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp @@ -104,43 +104,53 @@ nn::GeneralResult<nn::SharedPreparedModel> ResilientPreparedModel::recover( } nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> -ResilientPreparedModel::execute(const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration) const { - const auto fn = [&request, measure, &deadline, - &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) { - return preparedModel.execute(request, measure, deadline, loopTimeoutDuration); +ResilientPreparedModel::execute( + const nn::Request& request, nn::MeasureTiming measure, + const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { + const auto fn = [&request, measure, &deadline, &loopTimeoutDuration, &hints, + &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) { + return preparedModel.execute(request, measure, deadline, loopTimeoutDuration, hints, + extensionNameToPrefix); }; return protect(*this, fn); } nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> -ResilientPreparedModel::executeFenced(const nn::Request& request, - const std::vector<nn::SyncFence>& waitFor, - nn::MeasureTiming measure, - const nn::OptionalTimePoint& deadline, - const nn::OptionalDuration& loopTimeoutDuration, - const nn::OptionalDuration& timeoutDurationAfterFence) const { +ResilientPreparedModel::executeFenced( + const nn::Request& request, const std::vector<nn::SyncFence>& waitFor, + nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline, + const nn::OptionalDuration& loopTimeoutDuration, + const nn::OptionalDuration& timeoutDurationAfterFence, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { const auto fn = [&request, &waitFor, measure, &deadline, &loopTimeoutDuration, - &timeoutDurationAfterFence](const nn::IPreparedModel& preparedModel) { + &timeoutDurationAfterFence, &hints, + &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) { return preparedModel.executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration, - timeoutDurationAfterFence); + timeoutDurationAfterFence, hints, extensionNameToPrefix); }; return protect(*this, fn); } nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecution( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { #if 0 auto self = shared_from_this(); - ResilientExecution::Factory makeExecution = - [preparedModel = std::move(self), request, measure, loopTimeoutDuration] { - return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration); + ResilientExecution::Factory makeExecution = [preparedModel = std::move(self), request, measure, + loopTimeoutDuration, hints, + extensionNameToPrefix] { + return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration, + hints, extensionNameToPrefix); }; return ResilientExecution::create(std::move(makeExecution)); #else - return createReusableExecutionInternal(request, measure, loopTimeoutDuration); + return createReusableExecutionInternal(request, measure, loopTimeoutDuration, hints, + extensionNameToPrefix); #endif } @@ -159,13 +169,16 @@ nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBur nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecutionInternal( const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration) const { + const nn::OptionalDuration& loopTimeoutDuration, + const std::vector<nn::TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const { if (!isValidInternal()) { return std::make_shared<const InvalidExecution>(); } - const auto fn = [&request, measure, - &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) { - return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration); + const auto fn = [&request, measure, &loopTimeoutDuration, &hints, + &extensionNameToPrefix](const nn::IPreparedModel& preparedModel) { + return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration, hints, + extensionNameToPrefix); }; return protect(*this, fn); } diff --git a/neuralnetworks/utils/common/test/MockDevice.h b/neuralnetworks/utils/common/test/MockDevice.h index a9428bccfc..a0fc5c3de1 100644 --- a/neuralnetworks/utils/common/test/MockDevice.h +++ b/neuralnetworks/utils/common/test/MockDevice.h @@ -39,7 +39,9 @@ class MockDevice final : public IDevice { MOCK_METHOD(GeneralResult<SharedPreparedModel>, prepareModel, (const Model& model, ExecutionPreference preference, Priority priority, OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache, - const std::vector<SharedHandle>& dataCache, const CacheToken& token), + const std::vector<SharedHandle>& dataCache, const CacheToken& token, + const std::vector<TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix), (const, override)); MOCK_METHOD(GeneralResult<SharedPreparedModel>, prepareModelFromCache, (OptionalTimePoint deadline, const std::vector<SharedHandle>& modelCache, diff --git a/neuralnetworks/utils/common/test/MockPreparedModel.h b/neuralnetworks/utils/common/test/MockPreparedModel.h index c8ce006171..b8613b226c 100644 --- a/neuralnetworks/utils/common/test/MockPreparedModel.h +++ b/neuralnetworks/utils/common/test/MockPreparedModel.h @@ -27,17 +27,23 @@ class MockPreparedModel final : public IPreparedModel { public: MOCK_METHOD((ExecutionResult<std::pair<std::vector<OutputShape>, Timing>>), execute, (const Request& request, MeasureTiming measure, const OptionalTimePoint& deadline, - const OptionalDuration& loopTimeoutDuration), + const OptionalDuration& loopTimeoutDuration, + const std::vector<TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix), (const, override)); MOCK_METHOD((GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>>), executeFenced, (const Request& request, const std::vector<SyncFence>& waitFor, MeasureTiming measure, const OptionalTimePoint& deadline, const OptionalDuration& loopTimeoutDuration, - const OptionalDuration& timeoutDurationAfterFence), + const OptionalDuration& timeoutDurationAfterFence, + const std::vector<TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix), (const, override)); MOCK_METHOD((GeneralResult<SharedExecution>), createReusableExecution, - (const nn::Request& request, nn::MeasureTiming measure, - const nn::OptionalDuration& loopTimeoutDuration), + (const Request& request, MeasureTiming measure, + const OptionalDuration& loopTimeoutDuration, + const std::vector<TokenValuePair>& hints, + const std::vector<nn::ExtensionNameAndPrefix>& extensionNameToPrefix), (const, override)); MOCK_METHOD(GeneralResult<SharedBurst>, configureExecutionBurst, (), (const, override)); MOCK_METHOD(std::any, getUnderlyingResource, (), (const, override)); diff --git a/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp b/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp index 0488b6359b..d9b8505c0e 100644 --- a/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp +++ b/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp @@ -309,12 +309,12 @@ TEST(ResilientDeviceTest, prepareModel) { // setup call const auto [mockDevice, mockDeviceFactory, device] = setup(); const auto mockPreparedModel = std::make_shared<const nn::MockPreparedModel>(); - EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _)) + EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _)) .Times(1) .WillOnce(Return(mockPreparedModel)); // run test - const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}); + const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {}); // verify result ASSERT_TRUE(result.has_value()) @@ -324,12 +324,12 @@ TEST(ResilientDeviceTest, prepareModel) { TEST(ResilientDeviceTest, prepareModelError) { // setup call const auto [mockDevice, mockDeviceFactory, device] = setup(); - EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _)) + EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _)) .Times(1) .WillOnce(kReturnGeneralFailure); // run test - const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}); + const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -339,13 +339,13 @@ TEST(ResilientDeviceTest, prepareModelError) { TEST(ResilientDeviceTest, prepareModelDeadObjectFailedRecovery) { // setup call const auto [mockDevice, mockDeviceFactory, device] = setup(); - EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _)) + EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _)) .Times(1) .WillOnce(kReturnDeadObject); EXPECT_CALL(*mockDeviceFactory, Call(false)).Times(1).WillOnce(kReturnGeneralFailure); // run test - const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}); + const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -355,18 +355,18 @@ TEST(ResilientDeviceTest, prepareModelDeadObjectFailedRecovery) { TEST(ResilientDeviceTest, prepareModelDeadObjectSuccessfulRecovery) { // setup call const auto [mockDevice, mockDeviceFactory, device] = setup(); - EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _)) + EXPECT_CALL(*mockDevice, prepareModel(_, _, _, _, _, _, _, _, _)) .Times(1) .WillOnce(kReturnDeadObject); const auto recoveredMockDevice = createConfiguredMockDevice(); const auto mockPreparedModel = std::make_shared<const nn::MockPreparedModel>(); - EXPECT_CALL(*recoveredMockDevice, prepareModel(_, _, _, _, _, _, _)) + EXPECT_CALL(*recoveredMockDevice, prepareModel(_, _, _, _, _, _, _, _, _)) .Times(1) .WillOnce(Return(mockPreparedModel)); EXPECT_CALL(*mockDeviceFactory, Call(false)).Times(1).WillOnce(Return(recoveredMockDevice)); // run test - const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}); + const auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {}); // verify result ASSERT_TRUE(result.has_value()) @@ -679,7 +679,7 @@ TEST(ResilientDeviceTest, recoverCacheMismatchInvalidPrepareModel) { device->recover(mockDevice.get(), /*blocking=*/false); // run test - auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}); + auto result = device->prepareModel({}, {}, {}, {}, {}, {}, {}, {}, {}); // verify result ASSERT_TRUE(result.has_value()) diff --git a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp index d396ca88df..276bfba4ef 100644 --- a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp +++ b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp @@ -104,12 +104,12 @@ TEST(ResilientPreparedModelTest, getPreparedModel) { TEST(ResilientPreparedModelTest, execute) { // setup call const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); - EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)) + EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _)) .Times(1) .WillOnce(Return(kNoExecutionError)); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_TRUE(result.has_value()) @@ -119,10 +119,12 @@ TEST(ResilientPreparedModelTest, execute) { TEST(ResilientPreparedModelTest, executeError) { // setup call const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); - EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnGeneralFailure); + EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _)) + .Times(1) + .WillOnce(kReturnGeneralFailure); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -132,12 +134,12 @@ TEST(ResilientPreparedModelTest, executeError) { TEST(ResilientPreparedModelTest, executeDeadObjectFailedRecovery) { // setup call const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); - EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnDeadObject); + EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _)).Times(1).WillOnce(kReturnDeadObject); constexpr auto ret = [] { return nn::error(nn::ErrorStatus::GENERAL_FAILURE); }; EXPECT_CALL(*mockPreparedModelFactory, Call()).Times(1).WillOnce(ret); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -147,9 +149,9 @@ TEST(ResilientPreparedModelTest, executeDeadObjectFailedRecovery) { TEST(ResilientPreparedModelTest, executeDeadObjectSuccessfulRecovery) { // setup call const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); - EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _)).Times(1).WillOnce(kReturnDeadObject); + EXPECT_CALL(*mockPreparedModel, execute(_, _, _, _, _, _)).Times(1).WillOnce(kReturnDeadObject); const auto recoveredMockPreparedModel = createConfiguredMockPreparedModel(); - EXPECT_CALL(*recoveredMockPreparedModel, execute(_, _, _, _)) + EXPECT_CALL(*recoveredMockPreparedModel, execute(_, _, _, _, _, _)) .Times(1) .WillOnce(Return(kNoExecutionError)); EXPECT_CALL(*mockPreparedModelFactory, Call()) @@ -157,7 +159,7 @@ TEST(ResilientPreparedModelTest, executeDeadObjectSuccessfulRecovery) { .WillOnce(Return(recoveredMockPreparedModel)); // run test - const auto result = preparedModel->execute({}, {}, {}, {}); + const auto result = preparedModel->execute({}, {}, {}, {}, {}, {}); // verify result ASSERT_TRUE(result.has_value()) @@ -167,12 +169,12 @@ TEST(ResilientPreparedModelTest, executeDeadObjectSuccessfulRecovery) { TEST(ResilientPreparedModelTest, executeFenced) { // setup call const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); - EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _)) + EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _)) .Times(1) .WillOnce(Return(kNoFencedExecutionError)); // run test - const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}); + const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {}); // verify result ASSERT_TRUE(result.has_value()) @@ -182,12 +184,12 @@ TEST(ResilientPreparedModelTest, executeFenced) { TEST(ResilientPreparedModelTest, executeFencedError) { // setup call const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); - EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _)) + EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _)) .Times(1) .WillOnce(kReturnGeneralFailure); // run test - const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}); + const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -197,13 +199,13 @@ TEST(ResilientPreparedModelTest, executeFencedError) { TEST(ResilientPreparedModelTest, executeFencedDeadObjectFailedRecovery) { // setup call const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); - EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _)) + EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _)) .Times(1) .WillOnce(kReturnDeadObject); EXPECT_CALL(*mockPreparedModelFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure); // run test - const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}); + const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); @@ -213,11 +215,11 @@ TEST(ResilientPreparedModelTest, executeFencedDeadObjectFailedRecovery) { TEST(ResilientPreparedModelTest, executeFencedDeadObjectSuccessfulRecovery) { // setup call const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); - EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _)) + EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _, _)) .Times(1) .WillOnce(kReturnDeadObject); const auto recoveredMockPreparedModel = createConfiguredMockPreparedModel(); - EXPECT_CALL(*recoveredMockPreparedModel, executeFenced(_, _, _, _, _, _)) + EXPECT_CALL(*recoveredMockPreparedModel, executeFenced(_, _, _, _, _, _, _, _)) .Times(1) .WillOnce(Return(kNoFencedExecutionError)); EXPECT_CALL(*mockPreparedModelFactory, Call()) @@ -225,7 +227,7 @@ TEST(ResilientPreparedModelTest, executeFencedDeadObjectSuccessfulRecovery) { .WillOnce(Return(recoveredMockPreparedModel)); // run test - const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}); + const auto result = preparedModel->executeFenced({}, {}, {}, {}, {}, {}, {}, {}); // verify result ASSERT_TRUE(result.has_value()) @@ -235,12 +237,12 @@ TEST(ResilientPreparedModelTest, executeFencedDeadObjectSuccessfulRecovery) { TEST(ResilientPreparedModelTest, createReusableExecution) { // setup call const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); - EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _)) + EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _, _)) .Times(1) .WillOnce(Return(kNoCreateReusableExecutionError)); // run test - const auto result = preparedModel->createReusableExecution({}, {}, {}); + const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {}); // verify result ASSERT_TRUE(result.has_value()) @@ -250,12 +252,12 @@ TEST(ResilientPreparedModelTest, createReusableExecution) { TEST(ResilientPreparedModelTest, createReusableExecutionError) { // setup call const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup(); - EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _)) + EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _, _, _)) .Times(1) .WillOnce(kReturnGeneralFailure); // run test - const auto result = preparedModel->createReusableExecution({}, {}, {}); + const auto result = preparedModel->createReusableExecution({}, {}, {}, {}, {}); // verify result ASSERT_FALSE(result.has_value()); |