diff options
Diffstat (limited to 'neuralnetworks/aidl/utils/src')
-rw-r--r-- | neuralnetworks/aidl/utils/src/Conversions.cpp | 6 | ||||
-rw-r--r-- | neuralnetworks/aidl/utils/src/Device.cpp | 4 | ||||
-rw-r--r-- | neuralnetworks/aidl/utils/src/InvalidDevice.cpp | 3 |
3 files changed, 9 insertions, 4 deletions
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp index 9b897c43ac..83fda10ce2 100644 --- a/neuralnetworks/aidl/utils/src/Conversions.cpp +++ b/neuralnetworks/aidl/utils/src/Conversions.cpp @@ -614,7 +614,7 @@ struct overloaded : Ts... { using Ts::operator()...; }; template <class... Ts> -overloaded(Ts...)->overloaded<Ts...>; +overloaded(Ts...) -> overloaded<Ts...>; #ifdef __ANDROID__ nn::GeneralResult<common::NativeHandle> aidlHandleFromNativeHandle( @@ -1190,4 +1190,8 @@ nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& ve return std::vector<int32_t>(vec.begin(), vec.end()); } +std::vector<uint8_t> toVec(const std::array<uint8_t, IDevice::BYTE_SIZE_OF_CACHE_TOKEN>& token) { + return std::vector<uint8_t>(token.begin(), token.end()); +} + } // namespace aidl::android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/aidl/utils/src/Device.cpp b/neuralnetworks/aidl/utils/src/Device.cpp index f3f4fdbba1..b64a40dcaa 100644 --- a/neuralnetworks/aidl/utils/src/Device.cpp +++ b/neuralnetworks/aidl/utils/src/Device.cpp @@ -229,7 +229,6 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const auto aidlDeadline = NN_TRY(convert(deadline)); auto aidlModelCache = NN_TRY(convert(modelCache)); auto aidlDataCache = NN_TRY(convert(dataCache)); - const auto aidlToken = NN_TRY(convert(token)); const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(kFeatureLevel); const auto scoped = kDeathHandler.protectCallback(cb.get()); @@ -240,12 +239,13 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel( const auto ret = kDevice->prepareModelWithConfig( aidlModel, {aidlPreference, aidlPriority, aidlDeadline, std::move(aidlModelCache), - std::move(aidlDataCache), aidlToken, std::move(aidlHints), + std::move(aidlDataCache), token, std::move(aidlHints), std::move(aidlExtensionPrefix)}, cb); HANDLE_ASTATUS(ret) << "prepareModel failed"; return cb->get(); } + const auto aidlToken = NN_TRY(convert(token)); const auto ret = kDevice->prepareModel(aidlModel, aidlPreference, aidlPriority, aidlDeadline, aidlModelCache, aidlDataCache, aidlToken, cb); HANDLE_ASTATUS(ret) << "prepareModel failed"; diff --git a/neuralnetworks/aidl/utils/src/InvalidDevice.cpp b/neuralnetworks/aidl/utils/src/InvalidDevice.cpp index 33270ff381..44f8ea9ff8 100644 --- a/neuralnetworks/aidl/utils/src/InvalidDevice.cpp +++ b/neuralnetworks/aidl/utils/src/InvalidDevice.cpp @@ -189,7 +189,8 @@ ndk::ScopedAStatus InvalidDevice::prepareModelWithConfig( } } return prepareModel(model, config.preference, config.priority, config.deadlineNs, - config.modelCache, config.dataCache, config.cacheToken, callback); + config.modelCache, config.dataCache, utils::toVec(config.cacheToken), + callback); } ndk::ScopedAStatus InvalidDevice::prepareModelFromCache( |