summaryrefslogtreecommitdiff
path: root/neuralnetworks/utils/adapter/hidl/src
diff options
context:
space:
mode:
authorHaamed Gheibi <haamed@google.com>2022-02-04 13:47:26 -0800
committerHaamed Gheibi <haamed@google.com>2022-02-04 13:55:47 -0800
commitf99b35c293439db0b7436b47b939eb8c7bf21b51 (patch)
tree6cd9b0719554809447c845616317cca5409b93ae /neuralnetworks/utils/adapter/hidl/src
parenta028272dee9220e6810cbdcfb2328c34f8afe4c2 (diff)
parent332dead340bb196c6ba3f6978e8fb53966c74bf7 (diff)
Merge TP1A.220120.003
Change-Id: Ie5eba313ee102e452f5f96942ed2f3a7bb4e8f01
Diffstat (limited to 'neuralnetworks/utils/adapter/hidl/src')
-rw-r--r--neuralnetworks/utils/adapter/hidl/src/Adapter.cpp45
-rw-r--r--neuralnetworks/utils/adapter/hidl/src/Buffer.cpp83
-rw-r--r--neuralnetworks/utils/adapter/hidl/src/Burst.cpp259
-rw-r--r--neuralnetworks/utils/adapter/hidl/src/Device.cpp546
-rw-r--r--neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp433
5 files changed, 1366 insertions, 0 deletions
diff --git a/neuralnetworks/utils/adapter/hidl/src/Adapter.cpp b/neuralnetworks/utils/adapter/hidl/src/Adapter.cpp
new file mode 100644
index 0000000000..782e815a83
--- /dev/null
+++ b/neuralnetworks/utils/adapter/hidl/src/Adapter.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Adapter.h"
+
+#include "Device.h"
+
+#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <thread>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::adapter {
+
+sp<V1_3::IDevice> adapt(nn::SharedDevice device, Executor executor) {
+ return sp<Device>::make(std::move(device), std::move(executor));
+}
+
+sp<V1_3::IDevice> adapt(nn::SharedDevice device) {
+ Executor defaultExecutor = [](Task task, nn::OptionalTimePoint /*deadline*/) {
+ std::thread(std::move(task)).detach();
+ };
+ return adapt(std::move(device), std::move(defaultExecutor));
+}
+
+} // namespace android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/hidl/src/Buffer.cpp b/neuralnetworks/utils/adapter/hidl/src/Buffer.cpp
new file mode 100644
index 0000000000..3a04bf6b79
--- /dev/null
+++ b/neuralnetworks/utils/adapter/hidl/src/Buffer.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Buffer.h"
+
+#include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.3/IBuffer.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.3/Utils.h>
+#include <memory>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::adapter {
+namespace {
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+ auto result = nn::convert(object);
+ if (!result.has_value()) {
+ result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+ }
+ return result;
+}
+
+nn::GeneralResult<void> copyTo(const nn::SharedBuffer& buffer, const hidl_memory& dst) {
+ const auto memory = NN_TRY(convertInput(dst));
+ NN_TRY(buffer->copyTo(memory));
+ return {};
+}
+
+nn::GeneralResult<void> copyFrom(const nn::SharedBuffer& buffer, const hidl_memory& src,
+ const hidl_vec<uint32_t>& dimensions) {
+ const auto memory = NN_TRY(convertInput(src));
+ NN_TRY(buffer->copyFrom(memory, dimensions));
+ return {};
+}
+
+} // namespace
+
+Buffer::Buffer(nn::SharedBuffer buffer) : kBuffer(std::move(buffer)) {
+ CHECK(kBuffer != nullptr);
+}
+
+Return<V1_3::ErrorStatus> Buffer::copyTo(const hidl_memory& dst) {
+ auto result = adapter::copyTo(kBuffer, dst);
+ if (!result.has_value()) {
+ const auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::Buffer::copyTo failed with " << code << ": " << message;
+ return V1_3::utils::convert(code).value();
+ }
+ return V1_3::ErrorStatus::NONE;
+}
+
+Return<V1_3::ErrorStatus> Buffer::copyFrom(const hidl_memory& src,
+ const hidl_vec<uint32_t>& dimensions) {
+ auto result = adapter::copyFrom(kBuffer, src, dimensions);
+ if (!result.has_value()) {
+ const auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::Buffer::copyFrom failed with " << code << ": " << message;
+ return V1_3::utils::convert(code).value();
+ }
+ return V1_3::ErrorStatus::NONE;
+}
+
+} // namespace android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/hidl/src/Burst.cpp b/neuralnetworks/utils/adapter/hidl/src/Burst.cpp
new file mode 100644
index 0000000000..8b2e1dd465
--- /dev/null
+++ b/neuralnetworks/utils/adapter/hidl/src/Burst.cpp
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Burst.h"
+
+#include <android-base/logging.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.0/HandleError.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
+#include <nnapi/hal/1.2/BurstUtils.h>
+#include <nnapi/hal/1.2/Conversions.h>
+#include <nnapi/hal/TransferValue.h>
+
+#include <algorithm>
+#include <cstring>
+#include <limits>
+#include <map>
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "Tracing.h"
+
+namespace android::hardware::neuralnetworks::adapter {
+namespace {
+
+constexpr V1_2::Timing kTiming = {std::numeric_limits<uint64_t>::max(),
+ std::numeric_limits<uint64_t>::max()};
+
+nn::GeneralResult<std::vector<nn::SharedMemory>> getMemoriesCallback(
+ V1_0::ErrorStatus status, const hidl_vec<hidl_memory>& memories) {
+ HANDLE_STATUS_HIDL(status) << "getting burst memories failed with " << toString(status);
+ std::vector<nn::SharedMemory> canonicalMemories;
+ canonicalMemories.reserve(memories.size());
+ for (const auto& memory : memories) {
+ canonicalMemories.push_back(NN_TRY(nn::convert(memory)));
+ }
+ return canonicalMemories;
+}
+
+} // anonymous namespace
+
+Burst::MemoryCache::MemoryCache(nn::SharedBurst burstExecutor,
+ sp<V1_2::IBurstCallback> burstCallback)
+ : kBurstExecutor(std::move(burstExecutor)), kBurstCallback(std::move(burstCallback)) {
+ CHECK(kBurstExecutor != nullptr);
+ CHECK(kBurstCallback != nullptr);
+}
+
+nn::GeneralResult<std::vector<std::pair<nn::SharedMemory, nn::IBurst::OptionalCacheHold>>>
+Burst::MemoryCache::getCacheEntries(const std::vector<int32_t>& slots) {
+ std::lock_guard guard(mMutex);
+ NN_TRY(ensureCacheEntriesArePresentLocked(slots));
+
+ std::vector<std::pair<nn::SharedMemory, nn::IBurst::OptionalCacheHold>> results;
+ results.reserve(slots.size());
+ for (int32_t slot : slots) {
+ results.push_back(NN_TRY(getCacheEntryLocked(slot)));
+ }
+
+ return results;
+}
+
+nn::GeneralResult<void> Burst::MemoryCache::ensureCacheEntriesArePresentLocked(
+ const std::vector<int32_t>& slots) {
+ const auto slotIsKnown = [this](int32_t slot)
+ REQUIRES(mMutex) { return mCache.count(slot) > 0; };
+
+ // find unique unknown slots
+ std::vector<int32_t> unknownSlots = slots;
+ std::sort(unknownSlots.begin(), unknownSlots.end());
+ auto unknownSlotsEnd = std::unique(unknownSlots.begin(), unknownSlots.end());
+ unknownSlotsEnd = std::remove_if(unknownSlots.begin(), unknownSlotsEnd, slotIsKnown);
+ unknownSlots.erase(unknownSlotsEnd, unknownSlots.end());
+
+ // quick-exit if all slots are known
+ if (unknownSlots.empty()) {
+ return {};
+ }
+
+ auto cb = neuralnetworks::utils::CallbackValue(getMemoriesCallback);
+
+ const auto ret = kBurstCallback->getMemories(unknownSlots, cb);
+ HANDLE_TRANSPORT_FAILURE(ret);
+
+ auto returnedMemories = NN_TRY(cb.take());
+
+ if (returnedMemories.size() != unknownSlots.size()) {
+ return NN_ERROR() << "Burst::MemoryCache::ensureCacheEntriesArePresentLocked: Error "
+ "retrieving memories -- count mismatch between requested memories ("
+ << unknownSlots.size() << ") and returned memories ("
+ << returnedMemories.size() << ")";
+ }
+
+ // add memories to unknown slots
+ for (size_t i = 0; i < unknownSlots.size(); ++i) {
+ addCacheEntryLocked(unknownSlots[i], std::move(returnedMemories[i]));
+ }
+
+ return {};
+}
+
+nn::GeneralResult<std::pair<nn::SharedMemory, nn::IBurst::OptionalCacheHold>>
+Burst::MemoryCache::getCacheEntryLocked(int32_t slot) {
+ if (const auto iter = mCache.find(slot); iter != mCache.end()) {
+ return iter->second;
+ }
+ return NN_ERROR() << "Burst::MemoryCache::getCacheEntryLocked failed because slot " << slot
+ << " is not present in the cache";
+}
+
+void Burst::MemoryCache::addCacheEntryLocked(int32_t slot, nn::SharedMemory memory) {
+ auto hold = kBurstExecutor->cacheMemory(memory);
+ mCache.emplace(slot, std::make_pair(std::move(memory), std::move(hold)));
+}
+
+void Burst::MemoryCache::removeCacheEntry(int32_t slot) {
+ std::lock_guard guard(mMutex);
+ mCache.erase(slot);
+}
+
+// Burst methods
+
+nn::GeneralResult<sp<Burst>> Burst::create(
+ const sp<V1_2::IBurstCallback>& callback,
+ const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
+ const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, nn::SharedBurst burstExecutor,
+ std::chrono::microseconds pollingTimeWindow) {
+ // check inputs
+ if (callback == nullptr || burstExecutor == nullptr) {
+ return NN_ERROR() << "Burst::create passed a nullptr";
+ }
+
+ // create FMQ objects
+ auto requestChannelReceiver =
+ NN_TRY(V1_2::utils::RequestChannelReceiver::create(requestChannel, pollingTimeWindow));
+ auto resultChannelSender = NN_TRY(V1_2::utils::ResultChannelSender::create(resultChannel));
+
+ // check FMQ objects
+ CHECK(requestChannelReceiver != nullptr);
+ CHECK(resultChannelSender != nullptr);
+
+ // make and return context
+ return sp<Burst>::make(PrivateConstructorTag{}, callback, std::move(requestChannelReceiver),
+ std::move(resultChannelSender), std::move(burstExecutor));
+}
+
+Burst::Burst(PrivateConstructorTag /*tag*/, const sp<V1_2::IBurstCallback>& callback,
+ std::unique_ptr<V1_2::utils::RequestChannelReceiver> requestChannel,
+ std::unique_ptr<V1_2::utils::ResultChannelSender> resultChannel,
+ nn::SharedBurst burstExecutor)
+ : mCallback(callback),
+ mRequestChannelReceiver(std::move(requestChannel)),
+ mResultChannelSender(std::move(resultChannel)),
+ mBurstExecutor(std::move(burstExecutor)),
+ mMemoryCache(mBurstExecutor, mCallback) {
+ // TODO: highly document the threading behavior of this class
+ mWorker = std::thread([this] { task(); });
+}
+
+Burst::~Burst() {
+ // set teardown flag
+ mTeardown = true;
+ mRequestChannelReceiver->invalidate();
+
+ // wait for task thread to end
+ mWorker.join();
+}
+
+Return<void> Burst::freeMemory(int32_t slot) {
+ mMemoryCache.removeCacheEntry(slot);
+ return Void();
+}
+
+void Burst::task() {
+ // loop until the burst object is being destroyed
+ while (!mTeardown) {
+ // receive request
+ auto arguments = mRequestChannelReceiver->getBlocking();
+
+ // if the request packet was not properly received, return a generic error and skip the
+ // execution
+ //
+ // if the burst is being torn down, skip the execution so the "task" function can end
+ if (!arguments.has_value()) {
+ if (!mTeardown) {
+ mResultChannelSender->send(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kTiming);
+ }
+ continue;
+ }
+
+ // unpack the arguments; types are Request, std::vector<int32_t>, and V1_2::MeasureTiming,
+ // respectively
+ const auto [requestWithoutPools, slotsOfPools, measure] = std::move(arguments).value();
+
+ auto result = execute(requestWithoutPools, slotsOfPools, measure);
+
+ // return result
+ if (result.has_value()) {
+ const auto& [outputShapes, timing] = result.value();
+ mResultChannelSender->send(V1_0::ErrorStatus::NONE, outputShapes, timing);
+ } else {
+ const auto& [message, code, outputShapes] = result.error();
+ LOG(ERROR) << "IBurst::execute failed with " << code << ": " << message;
+ mResultChannelSender->send(V1_2::utils::convert(code).value(),
+ V1_2::utils::convert(outputShapes).value(), kTiming);
+ }
+ }
+}
+
+nn::ExecutionResult<std::pair<hidl_vec<V1_2::OutputShape>, V1_2::Timing>> Burst::execute(
+ const V1_0::Request& requestWithoutPools, const std::vector<int32_t>& slotsOfPools,
+ V1_2::MeasureTiming measure) {
+ NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
+ "Burst getting memory, executing, and returning results");
+
+ // ensure executor with cache has required memory
+ const auto cacheEntries = NN_TRY(mMemoryCache.getCacheEntries(slotsOfPools));
+
+ // convert request, populating its pools
+ // This code performs an unvalidated convert because the request object without its pools is
+ // invalid because it is incomplete. Instead, the validation is performed after the memory pools
+ // have been added to the request.
+ auto canonicalRequest = NN_TRY(nn::unvalidatedConvert(requestWithoutPools));
+ CHECK(canonicalRequest.pools.empty());
+ std::transform(cacheEntries.begin(), cacheEntries.end(),
+ std::back_inserter(canonicalRequest.pools),
+ [](const auto& cacheEntry) { return cacheEntry.first; });
+ NN_TRY(validate(canonicalRequest));
+
+ nn::MeasureTiming canonicalMeasure = NN_TRY(nn::convert(measure));
+
+ const auto [outputShapes, timing] =
+ NN_TRY(mBurstExecutor->execute(canonicalRequest, canonicalMeasure, {}, {}));
+
+ return std::make_pair(NN_TRY(V1_2::utils::convert(outputShapes)),
+ NN_TRY(V1_2::utils::convert(timing)));
+}
+
+} // namespace android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/hidl/src/Device.cpp b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
new file mode 100644
index 0000000000..4993a80a93
--- /dev/null
+++ b/neuralnetworks/utils/adapter/hidl/src/Device.cpp
@@ -0,0 +1,546 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Device.h"
+
+#include "Buffer.h"
+#include "PreparedModel.h"
+
+#include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.0/Utils.h>
+#include <nnapi/hal/1.1/Conversions.h>
+#include <nnapi/hal/1.1/Utils.h>
+#include <nnapi/hal/1.2/Conversions.h>
+#include <nnapi/hal/1.2/Utils.h>
+#include <nnapi/hal/1.3/Conversions.h>
+#include <nnapi/hal/1.3/Utils.h>
+
+#include <memory>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::adapter {
+namespace {
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+ auto result = nn::convert(object);
+ if (!result.has_value()) {
+ result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+ }
+ return result;
+}
+
+using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>;
+
+sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel, Executor executor) {
+ if (preparedModel == nullptr) {
+ return nullptr;
+ }
+ return sp<PreparedModel>::make(std::move(preparedModel), std::move(executor));
+}
+
+void notify(V1_0::IPreparedModelCallback* callback, nn::ErrorStatus status,
+ const sp<PreparedModel>& hidlPreparedModel) {
+ if (callback != nullptr) {
+ const auto hidlStatus = V1_0::utils::convert(status).value();
+ const auto ret = callback->notify(hidlStatus, hidlPreparedModel);
+ if (!ret.isOk()) {
+ LOG(ERROR) << "V1_0::IPreparedModelCallback::notify failed with " << ret.description();
+ }
+ }
+}
+
+void notify(V1_2::IPreparedModelCallback* callback, nn::ErrorStatus status,
+ const sp<PreparedModel>& hidlPreparedModel) {
+ if (callback != nullptr) {
+ const auto hidlStatus = V1_2::utils::convert(status).value();
+ const auto ret = callback->notify_1_2(hidlStatus, hidlPreparedModel);
+ if (!ret.isOk()) {
+ LOG(ERROR) << "V1_2::IPreparedModelCallback::notify_1_2 failed with "
+ << ret.description();
+ }
+ }
+}
+
+void notify(V1_3::IPreparedModelCallback* callback, nn::ErrorStatus status,
+ const sp<PreparedModel>& hidlPreparedModel) {
+ if (callback != nullptr) {
+ const auto hidlStatus = V1_3::utils::convert(status).value();
+ const auto ret = callback->notify_1_3(hidlStatus, hidlPreparedModel);
+ if (!ret.isOk()) {
+ LOG(ERROR) << "V1_3::IPreparedModelCallback::notify_1_3 failed with "
+ << ret.description();
+ }
+ }
+}
+
+template <typename CallbackType>
+void notify(CallbackType* callback, PrepareModelResult result, Executor executor) {
+ if (!result.has_value()) {
+ const auto [message, status] = std::move(result).error();
+ LOG(ERROR) << message;
+ notify(callback, status, nullptr);
+ } else {
+ auto preparedModel = std::move(result).value();
+ auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel), std::move(executor));
+ notify(callback, nn::ErrorStatus::NONE, std::move(hidlPreparedModel));
+ }
+}
+
+template <typename ModelType>
+nn::GeneralResult<hidl_vec<bool>> getSupportedOperations(const nn::SharedDevice& device,
+ const ModelType& model) {
+ const auto nnModel = NN_TRY(convertInput(model));
+ return NN_TRY(device->getSupportedOperations(nnModel));
+}
+
+nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Executor& executor,
+ const V1_0::Model& model,
+ const sp<V1_0::IPreparedModelCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ auto nnModel = NN_TRY(convertInput(model));
+
+ Task task = [device, nnModel = std::move(nnModel), executor, callback] {
+ auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT,
+ nn::Priority::DEFAULT, {}, {}, {}, {});
+ notify(callback.get(), std::move(result), executor);
+ };
+ executor(std::move(task), {});
+
+ return {};
+}
+
+nn::GeneralResult<void> prepareModel_1_1(const nn::SharedDevice& device, const Executor& executor,
+ const V1_1::Model& model,
+ V1_1::ExecutionPreference preference,
+ const sp<V1_0::IPreparedModelCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ auto nnModel = NN_TRY(convertInput(model));
+ const auto nnPreference = NN_TRY(convertInput(preference));
+
+ Task task = [device, nnModel = std::move(nnModel), nnPreference, executor, callback] {
+ auto result =
+ device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {}, {});
+ notify(callback.get(), std::move(result), executor);
+ };
+ executor(std::move(task), {});
+
+ return {};
+}
+
+nn::GeneralResult<void> prepareModel_1_2(const nn::SharedDevice& device, const Executor& executor,
+ const V1_2::Model& model,
+ V1_1::ExecutionPreference preference,
+ const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ auto nnModel = NN_TRY(convertInput(model));
+ const auto nnPreference = NN_TRY(convertInput(preference));
+ auto nnModelCache = NN_TRY(convertInput(modelCache));
+ auto nnDataCache = NN_TRY(convertInput(dataCache));
+ const auto nnToken = nn::CacheToken(token);
+
+ Task task = [device, nnModel = std::move(nnModel), nnPreference,
+ nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
+ nnToken, executor, callback] {
+ auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {},
+ nnModelCache, nnDataCache, nnToken);
+ notify(callback.get(), std::move(result), executor);
+ };
+ executor(std::move(task), {});
+
+ return {};
+}
+
+nn::GeneralResult<void> prepareModel_1_3(
+ const nn::SharedDevice& device, const Executor& executor, const V1_3::Model& model,
+ V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ const sp<V1_3::IPreparedModelCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ auto nnModel = NN_TRY(convertInput(model));
+ const auto nnPreference = NN_TRY(convertInput(preference));
+ const auto nnPriority = NN_TRY(convertInput(priority));
+ const auto nnDeadline = NN_TRY(convertInput(deadline));
+ auto nnModelCache = NN_TRY(convertInput(modelCache));
+ auto nnDataCache = NN_TRY(convertInput(dataCache));
+ const auto nnToken = nn::CacheToken(token);
+
+ Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
+ nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
+ nnToken, executor, callback] {
+ auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
+ nnModelCache, nnDataCache, nnToken);
+ notify(callback.get(), std::move(result), executor);
+ };
+ executor(std::move(task), nnDeadline);
+
+ return {};
+}
+
+nn::GeneralResult<void> prepareModelFromCache(const nn::SharedDevice& device,
+ const Executor& executor,
+ const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token,
+ const sp<V1_2::IPreparedModelCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ auto nnModelCache = NN_TRY(convertInput(modelCache));
+ auto nnDataCache = NN_TRY(convertInput(dataCache));
+ const auto nnToken = nn::CacheToken(token);
+
+ Task task = [device, nnModelCache = std::move(nnModelCache),
+ nnDataCache = std::move(nnDataCache), nnToken, executor, callback] {
+ auto result = device->prepareModelFromCache({}, nnModelCache, nnDataCache, nnToken);
+ notify(callback.get(), std::move(result), executor);
+ };
+ executor(std::move(task), {});
+
+ return {};
+}
+
+nn::GeneralResult<void> prepareModelFromCache_1_3(
+ const nn::SharedDevice& device, const Executor& executor,
+ const V1_3::OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ const sp<V1_3::IPreparedModelCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ const auto nnDeadline = NN_TRY(convertInput(deadline));
+ auto nnModelCache = NN_TRY(convertInput(modelCache));
+ auto nnDataCache = NN_TRY(convertInput(dataCache));
+ const auto nnToken = nn::CacheToken(token);
+
+ auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache),
+ nnDataCache = std::move(nnDataCache), nnToken, executor, callback] {
+ auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken);
+ notify(callback.get(), std::move(result), executor);
+ };
+ executor(std::move(task), nnDeadline);
+
+ return {};
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> downcast(const sp<V1_3::IPreparedModel>& preparedModel) {
+ if (preparedModel == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "preparedModel is nullptr";
+ }
+ if (preparedModel->isRemote()) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Cannot convert remote models";
+ }
+
+ // This static_cast is safe because adapter::PreparedModel is the only class that implements
+ // the IPreparedModel interface in the adapter service code.
+ const auto* casted = static_cast<const PreparedModel*>(preparedModel.get());
+ return casted->getUnderlyingPreparedModel();
+}
+
+nn::GeneralResult<std::vector<nn::SharedPreparedModel>> downcastAll(
+ const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels) {
+ std::vector<nn::SharedPreparedModel> canonical;
+ canonical.reserve(preparedModels.size());
+ for (const auto& preparedModel : preparedModels) {
+ canonical.push_back(NN_TRY(downcast(preparedModel)));
+ }
+ return canonical;
+}
+
+nn::GeneralResult<std::pair<sp<V1_3::IBuffer>, uint32_t>> allocate(
+ const nn::SharedDevice& device, const V1_3::BufferDesc& desc,
+ const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hidl_vec<V1_3::BufferRole>& outputRoles) {
+ auto nnDesc = NN_TRY(convertInput(desc));
+ auto nnPreparedModels = NN_TRY(downcastAll(preparedModels));
+ auto nnInputRoles = NN_TRY(convertInput(inputRoles));
+ auto nnOutputRoles = NN_TRY(convertInput(outputRoles));
+
+ auto buffer = NN_TRY(device->allocate(nnDesc, nnPreparedModels, nnInputRoles, nnOutputRoles));
+
+ const nn::Request::MemoryDomainToken token = buffer->getToken();
+ auto hidlBuffer = sp<Buffer>::make(std::move(buffer));
+ return std::make_pair(std::move(hidlBuffer), static_cast<uint32_t>(token));
+}
+
+} // namespace
+
+Device::Device(nn::SharedDevice device, Executor executor)
+ : kDevice(std::move(device)), kExecutor(std::move(executor)) {
+ CHECK(kDevice != nullptr);
+ CHECK(kExecutor != nullptr);
+}
+
+Return<void> Device::getCapabilities(getCapabilities_cb cb) {
+ const auto capabilities = V1_0::utils::convert(kDevice->getCapabilities()).value();
+ cb(V1_0::ErrorStatus::NONE, capabilities);
+ return Void();
+}
+
+Return<void> Device::getCapabilities_1_1(getCapabilities_1_1_cb cb) {
+ const auto capabilities = V1_1::utils::convert(kDevice->getCapabilities()).value();
+ cb(V1_0::ErrorStatus::NONE, capabilities);
+ return Void();
+}
+
+Return<void> Device::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
+ const auto capabilities = V1_2::utils::convert(kDevice->getCapabilities()).value();
+ cb(V1_0::ErrorStatus::NONE, capabilities);
+ return Void();
+}
+
+Return<void> Device::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
+ const auto capabilities = V1_3::utils::convert(kDevice->getCapabilities()).value();
+ cb(V1_3::ErrorStatus::NONE, capabilities);
+ return Void();
+}
+
+Return<void> Device::getVersionString(getVersionString_cb cb) {
+ cb(V1_0::ErrorStatus::NONE, kDevice->getVersionString());
+ return Void();
+}
+
+Return<void> Device::getType(getType_cb cb) {
+ const auto maybeDeviceType = V1_2::utils::convert(kDevice->getType());
+ if (!maybeDeviceType.has_value()) {
+ const auto& [message, code] = maybeDeviceType.error();
+ LOG(ERROR) << "adapter::Device::getType failed with " << code << ": " << message;
+ cb(V1_2::utils::convert(code).value(), {});
+ } else {
+ cb(V1_0::ErrorStatus::NONE, maybeDeviceType.value());
+ }
+ return Void();
+}
+
+Return<void> Device::getSupportedExtensions(getSupportedExtensions_cb cb) {
+ const auto maybeSupportedExtensions = V1_2::utils::convert(kDevice->getSupportedExtensions());
+ if (!maybeSupportedExtensions.has_value()) {
+ const auto& [message, code] = maybeSupportedExtensions.error();
+ LOG(ERROR) << "adapter::Device::getSupportedExtensions failed with " << code << ": "
+ << message;
+ cb(V1_2::utils::convert(code).value(), {});
+ } else {
+ cb(V1_0::ErrorStatus::NONE, maybeSupportedExtensions.value());
+ }
+ return Void();
+}
+
+Return<void> Device::getSupportedOperations(const V1_0::Model& model,
+ getSupportedOperations_cb cb) {
+ const auto result = adapter::getSupportedOperations(kDevice, model);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ LOG(ERROR) << "adapter::Device::getSupportedOperations_1_0 failed with " << code << ": "
+ << message;
+ cb(V1_0::utils::convert(code).value(), {});
+ } else {
+ cb(V1_0::ErrorStatus::NONE, result.value());
+ }
+ return Void();
+}
+
+Return<void> Device::getSupportedOperations_1_1(const V1_1::Model& model,
+ getSupportedOperations_1_1_cb cb) {
+ const auto result = adapter::getSupportedOperations(kDevice, model);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ LOG(ERROR) << "adapter::Device::getSupportedOperations_1_1 failed with " << code << ": "
+ << message;
+ cb(V1_1::utils::convert(code).value(), {});
+ } else {
+ cb(V1_0::ErrorStatus::NONE, result.value());
+ }
+ return Void();
+}
+
+Return<void> Device::getSupportedOperations_1_2(const V1_2::Model& model,
+ getSupportedOperations_1_2_cb cb) {
+ const auto result = adapter::getSupportedOperations(kDevice, model);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ LOG(ERROR) << "adapter::Device::getSupportedOperations_1_2 failed with " << code << ": "
+ << message;
+ cb(V1_2::utils::convert(code).value(), {});
+ } else {
+ cb(V1_0::ErrorStatus::NONE, result.value());
+ }
+ return Void();
+}
+
+Return<void> Device::getSupportedOperations_1_3(const V1_3::Model& model,
+ getSupportedOperations_1_3_cb cb) {
+ const auto result = adapter::getSupportedOperations(kDevice, model);
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ LOG(ERROR) << "adapter::Device::getSupportedOperations_1_3 failed with " << code << ": "
+ << message;
+ cb(V1_3::utils::convert(code).value(), {});
+ } else {
+ cb(V1_3::ErrorStatus::NONE, result.value());
+ }
+ return Void();
+}
+
+Return<void> Device::getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) {
+ const auto [numModelCache, numDataCache] = kDevice->getNumberOfCacheFilesNeeded();
+ cb(V1_0::ErrorStatus::NONE, numModelCache, numDataCache);
+ return Void();
+}
+
+Return<V1_0::ErrorStatus> Device::prepareModel(const V1_0::Model& model,
+ const sp<V1_0::IPreparedModelCallback>& callback) {
+ auto result = adapter::prepareModel(kDevice, kExecutor, model, callback);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::Device::prepareModel failed with " << code << ": " << message;
+ notify(callback.get(), code, nullptr);
+ return V1_0::utils::convert(code).value();
+ }
+ return V1_0::ErrorStatus::NONE;
+}
+
+Return<V1_0::ErrorStatus> Device::prepareModel_1_1(
+ const V1_1::Model& model, V1_1::ExecutionPreference preference,
+ const sp<V1_0::IPreparedModelCallback>& callback) {
+ auto result = adapter::prepareModel_1_1(kDevice, kExecutor, model, preference, callback);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::Device::prepareModel_1_1 failed with " << code << ": " << message;
+ notify(callback.get(), code, nullptr);
+ return V1_1::utils::convert(code).value();
+ }
+ return V1_0::ErrorStatus::NONE;
+}
+
+Return<V1_0::ErrorStatus> Device::prepareModel_1_2(
+ const V1_2::Model& model, V1_1::ExecutionPreference preference,
+ const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ auto result = adapter::prepareModel_1_2(kDevice, kExecutor, model, preference, modelCache,
+ dataCache, token, callback);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::Device::prepareModel_1_2 failed with " << code << ": " << message;
+ notify(callback.get(), code, nullptr);
+ return V1_2::utils::convert(code).value();
+ }
+ return V1_0::ErrorStatus::NONE;
+}
+
+Return<V1_3::ErrorStatus> Device::prepareModel_1_3(
+ const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
+ const V1_3::OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ const sp<V1_3::IPreparedModelCallback>& callback) {
+ auto result = adapter::prepareModel_1_3(kDevice, kExecutor, model, preference, priority,
+ deadline, modelCache, dataCache, token, callback);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::Device::prepareModel_1_3 failed with " << code << ": " << message;
+ notify(callback.get(), code, nullptr);
+ return V1_3::utils::convert(code).value();
+ }
+ return V1_3::ErrorStatus::NONE;
+}
+
+Return<V1_0::ErrorStatus> Device::prepareModelFromCache(
+ const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
+ const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
+ auto result = adapter::prepareModelFromCache(kDevice, kExecutor, modelCache, dataCache, token,
+ callback);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::Device::prepareModelFromCache failed with " << code << ": "
+ << message;
+ notify(callback.get(), code, nullptr);
+ return V1_2::utils::convert(code).value();
+ }
+ return V1_0::ErrorStatus::NONE;
+}
+
+Return<V1_3::ErrorStatus> Device::prepareModelFromCache_1_3(
+ const V1_3::OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
+ const sp<V1_3::IPreparedModelCallback>& callback) {
+ auto result = adapter::prepareModelFromCache_1_3(kDevice, kExecutor, deadline, modelCache,
+ dataCache, token, callback);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::Device::prepareModelFromCache_1_3 failed with " << code << ": "
+ << message;
+ notify(callback.get(), code, nullptr);
+ return V1_3::utils::convert(code).value();
+ }
+ return V1_3::ErrorStatus::NONE;
+}
+
+Return<V1_0::DeviceStatus> Device::getStatus() {
+ return V1_0::DeviceStatus::AVAILABLE;
+}
+
+Return<void> Device::allocate(const V1_3::BufferDesc& desc,
+ const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
+ const hidl_vec<V1_3::BufferRole>& inputRoles,
+ const hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) {
+ auto result = adapter::allocate(kDevice, desc, preparedModels, inputRoles, outputRoles);
+ if (!result.has_value()) {
+ const auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::Device::allocate failed with " << code << ": " << message;
+ cb(V1_3::utils::convert(code).value(), nullptr, /*token=*/0);
+ return Void();
+ }
+ auto [buffer, token] = std::move(result).value();
+ cb(V1_3::ErrorStatus::NONE, buffer, token);
+ return Void();
+}
+
+} // namespace android::hardware::neuralnetworks::adapter
diff --git a/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
new file mode 100644
index 0000000000..71060d5ca7
--- /dev/null
+++ b/neuralnetworks/utils/adapter/hidl/src/PreparedModel.cpp
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PreparedModel.h"
+
+#include "Burst.h"
+
+#include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.2/IBurstCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.3/IFencedExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+#include <nnapi/hal/1.0/Utils.h>
+#include <nnapi/hal/1.2/Utils.h>
+#include <nnapi/hal/1.3/Conversions.h>
+#include <nnapi/hal/1.3/Utils.h>
+
+#include <memory>
+#include <thread>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::adapter {
+namespace {
+
+template <typename Type>
+auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
+ auto result = nn::convert(object);
+ if (!result.has_value()) {
+ result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+ }
+ return result;
+}
+
+nn::GeneralResult<nn::Version> validateRequestForModel(const nn::Request& request,
+ const nn::Model& model) {
+ nn::GeneralResult<nn::Version> version = nn::validateRequestForModel(request, model);
+ if (!version.ok()) {
+ version.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
+ }
+ return version;
+}
+
+class FencedExecutionCallback final : public V1_3::IFencedExecutionCallback {
+ public:
+ explicit FencedExecutionCallback(const nn::ExecuteFencedInfoCallback& callback)
+ : kCallback(callback) {
+ CHECK(callback != nullptr);
+ }
+
+ Return<void> getExecutionInfo(getExecutionInfo_cb cb) override {
+ const auto result = kCallback();
+ if (!result.has_value()) {
+ const auto& [message, code] = result.error();
+ const auto status =
+ V1_3::utils::convert(code).value_or(V1_3::ErrorStatus::GENERAL_FAILURE);
+ LOG(ERROR) << message;
+ cb(status, V1_2::utils::kNoTiming, V1_2::utils::kNoTiming);
+ return Void();
+ }
+ const auto [timingLaunched, timingFenced] = result.value();
+ const auto hidlTimingLaunched = V1_3::utils::convert(timingLaunched).value();
+ const auto hidlTimingFenced = V1_3::utils::convert(timingFenced).value();
+ cb(V1_3::ErrorStatus::NONE, hidlTimingLaunched, hidlTimingFenced);
+ return Void();
+ }
+
+ private:
+ const nn::ExecuteFencedInfoCallback kCallback;
+};
+
+using ExecutionResult = nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>;
+
+void notify(V1_0::IExecutionCallback* callback, nn::ErrorStatus status,
+ const std::vector<nn::OutputShape>& /*outputShapes*/, const nn::Timing& /*timing*/) {
+ if (callback != nullptr) {
+ const auto hidlStatus = V1_0::utils::convert(status).value();
+ const auto ret = callback->notify(hidlStatus);
+ if (!ret.isOk()) {
+ LOG(ERROR) << "V1_0::IExecutionCallback::notify failed with " << ret.description();
+ }
+ }
+}
+
+void notify(V1_2::IExecutionCallback* callback, nn::ErrorStatus status,
+ const std::vector<nn::OutputShape>& outputShapes, const nn::Timing& timing) {
+ if (callback != nullptr) {
+ const auto hidlStatus = V1_2::utils::convert(status).value();
+ const auto hidlOutputShapes = V1_2::utils::convert(outputShapes).value();
+ const auto hidlTiming = V1_2::utils::convert(timing).value();
+ const auto ret = callback->notify_1_2(hidlStatus, hidlOutputShapes, hidlTiming);
+ if (!ret.isOk()) {
+ LOG(ERROR) << "V1_2::IExecutionCallback::notify_1_2 failed with " << ret.description();
+ }
+ }
+}
+
+void notify(V1_3::IExecutionCallback* callback, nn::ErrorStatus status,
+ const std::vector<nn::OutputShape>& outputShapes, const nn::Timing& timing) {
+ if (callback != nullptr) {
+ const auto hidlStatus = V1_3::utils::convert(status).value();
+ const auto hidlOutputShapes = V1_3::utils::convert(outputShapes).value();
+ const auto hidlTiming = V1_3::utils::convert(timing).value();
+ const auto ret = callback->notify_1_3(hidlStatus, hidlOutputShapes, hidlTiming);
+ if (!ret.isOk()) {
+ LOG(ERROR) << "V1_3::IExecutionCallback::notify_1_3 failed with " << ret.description();
+ }
+ }
+}
+
+template <typename CallbackType>
+void notify(CallbackType* callback, ExecutionResult result) {
+ if (!result.has_value()) {
+ const auto [message, status, outputShapes] = std::move(result).error();
+ LOG(ERROR) << message;
+ notify(callback, status, outputShapes, {});
+ } else {
+ const auto [outputShapes, timing] = std::move(result).value();
+ notify(callback, nn::ErrorStatus::NONE, outputShapes, timing);
+ }
+}
+
+nn::GeneralResult<void> execute(const nn::SharedPreparedModel& preparedModel,
+ const Executor& executor, const V1_0::Request& request,
+ const sp<V1_0::IExecutionCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ auto nnRequest = NN_TRY(convertInput(request));
+
+ const std::any resource = preparedModel->getUnderlyingResource();
+ if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
+ CHECK(*model != nullptr);
+ NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
+ }
+
+ Task task = [preparedModel, nnRequest = std::move(nnRequest), callback] {
+ auto result = preparedModel->execute(nnRequest, nn::MeasureTiming::NO, {}, {});
+ notify(callback.get(), std::move(result));
+ };
+ executor(std::move(task), {});
+
+ return {};
+}
+
+nn::GeneralResult<void> execute_1_2(const nn::SharedPreparedModel& preparedModel,
+ const Executor& executor, const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ auto nnRequest = NN_TRY(convertInput(request));
+ const auto nnMeasure = NN_TRY(convertInput(measure));
+
+ const std::any resource = preparedModel->getUnderlyingResource();
+ if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
+ CHECK(*model != nullptr);
+ NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
+ }
+
+ Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, callback] {
+ auto result = preparedModel->execute(nnRequest, nnMeasure, {}, {});
+ notify(callback.get(), std::move(result));
+ };
+ executor(std::move(task), {});
+
+ return {};
+}
+
+nn::GeneralResult<void> execute_1_3(const nn::SharedPreparedModel& preparedModel,
+ const Executor& executor, const V1_3::Request& request,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<V1_3::IExecutionCallback>& callback) {
+ if (callback.get() == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
+ }
+
+ auto nnRequest = NN_TRY(convertInput(request));
+ const auto nnMeasure = NN_TRY(convertInput(measure));
+ const auto nnDeadline = NN_TRY(convertInput(deadline));
+ const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration));
+
+ const std::any resource = preparedModel->getUnderlyingResource();
+ if (const auto* model = std::any_cast<const nn::Model*>(&resource)) {
+ CHECK(*model != nullptr);
+ NN_TRY(adapter::validateRequestForModel(nnRequest, **model));
+ }
+
+ Task task = [preparedModel, nnRequest = std::move(nnRequest), nnMeasure, nnDeadline,
+ nnLoopTimeoutDuration, callback] {
+ auto result =
+ preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration);
+ notify(callback.get(), std::move(result));
+ };
+ executor(std::move(task), nnDeadline);
+
+ return {};
+}
+
+nn::ExecutionResult<std::pair<hidl_vec<V1_2::OutputShape>, V1_2::Timing>> executeSynchronously(
+ const nn::SharedPreparedModel& preparedModel, const V1_0::Request& request,
+ V1_2::MeasureTiming measure) {
+ const auto nnRequest = NN_TRY(convertInput(request));
+ const auto nnMeasure = NN_TRY(convertInput(measure));
+
+ const auto [outputShapes, timing] =
+ NN_TRY(preparedModel->execute(nnRequest, nnMeasure, {}, {}));
+
+ auto hidlOutputShapes = NN_TRY(V1_2::utils::convert(outputShapes));
+ const auto hidlTiming = NN_TRY(V1_2::utils::convert(timing));
+ return std::make_pair(std::move(hidlOutputShapes), hidlTiming);
+}
+
+nn::ExecutionResult<std::pair<hidl_vec<V1_2::OutputShape>, V1_2::Timing>> executeSynchronously_1_3(
+ const nn::SharedPreparedModel& preparedModel, const V1_3::Request& request,
+ V1_2::MeasureTiming measure, const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration) {
+ const auto nnRequest = NN_TRY(convertInput(request));
+ const auto nnMeasure = NN_TRY(convertInput(measure));
+ const auto nnDeadline = NN_TRY(convertInput(deadline));
+ const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration));
+
+ const auto [outputShapes, timing] =
+ NN_TRY(preparedModel->execute(nnRequest, nnMeasure, nnDeadline, nnLoopTimeoutDuration));
+
+ auto hidlOutputShapes = NN_TRY(V1_3::utils::convert(outputShapes));
+ const auto hidlTiming = NN_TRY(V1_3::utils::convert(timing));
+ return std::make_pair(std::move(hidlOutputShapes), hidlTiming);
+}
+
+nn::GeneralResult<std::vector<nn::SyncFence>> convertSyncFences(
+ const hidl_vec<hidl_handle>& handles) {
+ auto nnHandles = NN_TRY(convertInput(handles));
+ std::vector<nn::SyncFence> syncFences;
+ syncFences.reserve(handles.size());
+ for (auto&& handle : nnHandles) {
+ if (auto syncFence = nn::SyncFence::create(std::move(handle)); !syncFence.ok()) {
+ return nn::error(nn::ErrorStatus::INVALID_ARGUMENT) << std::move(syncFence).error();
+ } else {
+ syncFences.push_back(std::move(syncFence).value());
+ }
+ }
+ return syncFences;
+}
+
+nn::GeneralResult<sp<V1_2::IBurstContext>> configureExecutionBurst(
+ const nn::SharedPreparedModel& preparedModel, const sp<V1_2::IBurstCallback>& callback,
+ const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
+ const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel) {
+ auto burstExecutor = NN_TRY(preparedModel->configureExecutionBurst());
+ return Burst::create(callback, requestChannel, resultChannel, std::move(burstExecutor),
+ V1_2::utils::getBurstServerPollingTimeWindow());
+}
+
+nn::GeneralResult<std::pair<hidl_handle, sp<V1_3::IFencedExecutionCallback>>> executeFenced(
+ const nn::SharedPreparedModel& preparedModel, const V1_3::Request& request,
+ const hidl_vec<hidl_handle>& waitFor, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration) {
+ const auto nnRequest = NN_TRY(convertInput(request));
+ const auto nnWaitFor = NN_TRY(convertSyncFences(waitFor));
+ const auto nnMeasure = NN_TRY(convertInput(measure));
+ const auto nnDeadline = NN_TRY(convertInput(deadline));
+ const auto nnLoopTimeoutDuration = NN_TRY(convertInput(loopTimeoutDuration));
+ const auto nnDuration = NN_TRY(convertInput(duration));
+
+ auto [syncFence, executeFencedCallback] = NN_TRY(preparedModel->executeFenced(
+ nnRequest, nnWaitFor, nnMeasure, nnDeadline, nnLoopTimeoutDuration, nnDuration));
+
+ auto hidlSyncFence = NN_TRY(V1_3::utils::convert(syncFence.getSharedHandle()));
+ auto hidlExecuteFencedCallback = sp<FencedExecutionCallback>::make(executeFencedCallback);
+ return std::make_pair(std::move(hidlSyncFence), std::move(hidlExecuteFencedCallback));
+}
+
+} // namespace
+
+PreparedModel::PreparedModel(nn::SharedPreparedModel preparedModel, Executor executor)
+ : kPreparedModel(std::move(preparedModel)), kExecutor(std::move(executor)) {
+ CHECK(kPreparedModel != nullptr);
+ CHECK(kExecutor != nullptr);
+}
+
+nn::SharedPreparedModel PreparedModel::getUnderlyingPreparedModel() const {
+ return kPreparedModel;
+}
+
+Return<V1_0::ErrorStatus> PreparedModel::execute(const V1_0::Request& request,
+ const sp<V1_0::IExecutionCallback>& callback) {
+ auto result = adapter::execute(kPreparedModel, kExecutor, request, callback);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::PreparedModel::execute failed with " << code << ": " << message;
+ notify(callback.get(), code, {}, {});
+ return V1_0::utils::convert(code).value();
+ }
+ return V1_0::ErrorStatus::NONE;
+}
+
+Return<V1_0::ErrorStatus> PreparedModel::execute_1_2(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ const sp<V1_2::IExecutionCallback>& callback) {
+ auto result = adapter::execute_1_2(kPreparedModel, kExecutor, request, measure, callback);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::PreparedModel::execute_1_2 failed with " << code << ": " << message;
+ notify(callback.get(), code, {}, {});
+ return V1_2::utils::convert(code).value();
+ }
+ return V1_0::ErrorStatus::NONE;
+}
+
+Return<V1_3::ErrorStatus> PreparedModel::execute_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const sp<V1_3::IExecutionCallback>& callback) {
+ auto result = adapter::execute_1_3(kPreparedModel, kExecutor, request, measure, deadline,
+ loopTimeoutDuration, callback);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::PreparedModel::execute_1_3 failed with " << code << ": " << message;
+ notify(callback.get(), code, {}, {});
+ return V1_3::utils::convert(code).value();
+ }
+ return V1_3::ErrorStatus::NONE;
+}
+
+Return<void> PreparedModel::executeSynchronously(const V1_0::Request& request,
+ V1_2::MeasureTiming measure,
+ executeSynchronously_cb cb) {
+ auto result = adapter::executeSynchronously(kPreparedModel, request, measure);
+ if (!result.has_value()) {
+ auto [message, code, outputShapes] = std::move(result).error();
+ LOG(ERROR) << "adapter::PreparedModel::executeSynchronously failed with " << code << ": "
+ << message;
+ cb(V1_2::utils::convert(code).value(), V1_2::utils::convert(outputShapes).value(),
+ V1_2::utils::kNoTiming);
+ return Void();
+ }
+ auto [outputShapes, timing] = std::move(result).value();
+ cb(V1_0::ErrorStatus::NONE, outputShapes, timing);
+ return Void();
+}
+
+Return<void> PreparedModel::executeSynchronously_1_3(
+ const V1_3::Request& request, V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration, executeSynchronously_1_3_cb cb) {
+ auto result = adapter::executeSynchronously_1_3(kPreparedModel, request, measure, deadline,
+ loopTimeoutDuration);
+ if (!result.has_value()) {
+ auto [message, code, outputShapes] = std::move(result).error();
+ LOG(ERROR) << "adapter::PreparedModel::executeSynchronously_1_3 failed with " << code
+ << ": " << message;
+ cb(V1_3::utils::convert(code).value(), V1_3::utils::convert(outputShapes).value(),
+ V1_2::utils::kNoTiming);
+ return Void();
+ }
+ auto [outputShapes, timing] = std::move(result).value();
+ cb(V1_3::ErrorStatus::NONE, outputShapes, timing);
+ return Void();
+}
+
+Return<void> PreparedModel::configureExecutionBurst(
+ const sp<V1_2::IBurstCallback>& callback,
+ const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
+ const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
+ configureExecutionBurst_cb cb) {
+ auto result = adapter::configureExecutionBurst(kPreparedModel, callback, requestChannel,
+ resultChannel);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::PreparedModel::configureExecutionBurst failed with " << code << ": "
+ << message;
+ cb(V1_2::utils::convert(code).value(), nullptr);
+ return Void();
+ }
+ auto burstContext = std::move(result).value();
+ cb(V1_0::ErrorStatus::NONE, std::move(burstContext));
+ return Void();
+}
+
+Return<void> PreparedModel::executeFenced(const V1_3::Request& request,
+ const hidl_vec<hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure,
+ const V1_3::OptionalTimePoint& deadline,
+ const V1_3::OptionalTimeoutDuration& loopTimeoutDuration,
+ const V1_3::OptionalTimeoutDuration& duration,
+ executeFenced_cb callback) {
+ auto result = adapter::executeFenced(kPreparedModel, request, waitFor, measure, deadline,
+ loopTimeoutDuration, duration);
+ if (!result.has_value()) {
+ auto [message, code] = std::move(result).error();
+ LOG(ERROR) << "adapter::PreparedModel::executeFenced failed with " << code << ": "
+ << message;
+ callback(V1_3::utils::convert(code).value(), {}, nullptr);
+ return Void();
+ }
+ auto [syncFence, executeFencedCallback] = std::move(result).value();
+ callback(V1_3::ErrorStatus::NONE, syncFence, executeFencedCallback);
+ return Void();
+}
+
+} // namespace android::hardware::neuralnetworks::adapter