From fadeb8a92046c6941db721145de3514cf6015079 Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Sun, 7 Feb 2021 00:11:13 -0800 Subject: Change NNAPI Memory to ref-counted SharedMemory -- hal Bug: 179906132 Test: mma Test: NeuralNetworksTest_static Test: presubmit Change-Id: I6435db906a2efe4938da18149a1fcd6d24730a95 Merged-In: I6435db906a2efe4938da18149a1fcd6d24730a95 (cherry picked from commit 79a16ebb6f42c21a21202f7b63ce372f2df15137) --- neuralnetworks/utils/common/src/CommonUtils.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'neuralnetworks/utils/common/src/CommonUtils.cpp') diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp index c04c8dfa8b..90215630c9 100644 --- a/neuralnetworks/utils/common/src/CommonUtils.cpp +++ b/neuralnetworks/utils/common/src/CommonUtils.cpp @@ -203,13 +203,13 @@ nn::GeneralResult> flushDataFromPointe nn::GeneralResult unflushDataFromSharedToPointer( const nn::Request& request, const std::optional& maybeRequestInShared) { if (!maybeRequestInShared.has_value() || maybeRequestInShared->pools.empty() || - !std::holds_alternative(maybeRequestInShared->pools.back())) { + !std::holds_alternative(maybeRequestInShared->pools.back())) { return {}; } const auto& requestInShared = *maybeRequestInShared; // Map the memory. - const auto& outputMemory = std::get(requestInShared.pools.back()); + const auto& outputMemory = std::get(requestInShared.pools.back()); const auto [pointer, size, context] = NN_TRY(map(outputMemory)); const uint8_t* constantPointer = std::visit([](const auto& o) { return static_cast(o); }, pointer); -- cgit v1.2.3 From ab2f482af37540942e2d1702e062a29575e8178d Mon Sep 17 00:00:00 2001 From: Michael Butler Date: Mon, 8 Feb 2021 00:05:07 -0800 Subject: Store AHWB in NN canonical memory type -- hal Prior to this CL, the canonical memory type only held a SharedHandle, which mirrors the behavior of native_handle_t/hidl_handle. This means memory types including AHardwareBuffer were stored as this SharedHandle type. With this CL, the canonical memory type is stored directly as AHardwareBuffer to avoid using non-NDK AHardwareBuffer calls in the NN runtime. Bug: 179906132 Test: mma Test: NeuralNetworksTest_static Test: presubmit Change-Id: I394071c193d15ac0c90ac47e5a2a9a79c635db6c Merged-In: I394071c193d15ac0c90ac47e5a2a9a79c635db6c (cherry picked from commit bbe43d950e981cfb5c06622c8f80b57ab60b0497) --- neuralnetworks/utils/common/src/CommonUtils.cpp | 121 ++++++++++++++++++++---- 1 file changed, 104 insertions(+), 17 deletions(-) (limited to 'neuralnetworks/utils/common/src/CommonUtils.cpp') diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp index 90215630c9..7a5035f6fc 100644 --- a/neuralnetworks/utils/common/src/CommonUtils.cpp +++ b/neuralnetworks/utils/common/src/CommonUtils.cpp @@ -20,11 +20,14 @@ #include #include +#include +#include #include #include #include #include #include +#include #include #include @@ -248,44 +251,128 @@ std::vector countNumberOfConsumers(size_t numberOfOperands, return nn::countNumberOfConsumers(numberOfOperands, operations); } -nn::GeneralResult hidlHandleFromSharedHandle(const nn::SharedHandle& handle) { - if (handle == nullptr) { - return {}; +nn::GeneralResult createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory) { + if (memory == nullptr) { + return NN_ERROR() << "Memory must be non-empty"; + } + if (const auto* handle = std::get_if(&memory->handle)) { + return hidl_memory(memory->name, NN_TRY(hidlHandleFromSharedHandle(*handle)), memory->size); + } + + const auto* ahwb = std::get(memory->handle).get(); + AHardwareBuffer_Desc bufferDesc; + AHardwareBuffer_describe(ahwb, &bufferDesc); + + if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) { + CHECK_EQ(memory->size, bufferDesc.width); + CHECK_EQ(memory->name, "hardware_buffer_blob"); + } else { + CHECK_EQ(memory->size, 0u); + CHECK_EQ(memory->name, "hardware_buffer"); } + const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb); + const hidl_handle hidlHandle(nativeHandle); + hidl_handle handle(hidlHandle); + + return hidl_memory(memory->name, std::move(handle), memory->size); +} + +static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) { + return (value + multiple - 1) / multiple * multiple; +} + +nn::GeneralResult createSharedMemoryFromHidlMemory(const hidl_memory& memory) { + CHECK_LE(memory.size(), std::numeric_limits::max()); + + if (memory.name() != "hardware_buffer_blob") { + return std::make_shared(nn::Memory{ + .handle = NN_TRY(sharedHandleFromNativeHandle(memory.handle())), + .size = static_cast(memory.size()), + .name = memory.name(), + }); + } + + const auto size = memory.size(); + const auto format = AHARDWAREBUFFER_FORMAT_BLOB; + const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN; + const uint32_t width = size; + const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer. + const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer. + + // AHardwareBuffer_createFromHandle() might fail because an allocator + // expects a specific stride value. In that case, we try to guess it by + // aligning the width to small powers of 2. + // TODO(b/174120849): Avoid stride assumptions. + AHardwareBuffer* hardwareBuffer = nullptr; + status_t status = UNKNOWN_ERROR; + for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) { + const uint32_t stride = roundUpToMultiple(width, alignment); + AHardwareBuffer_Desc desc{ + .width = width, + .height = height, + .layers = layers, + .format = format, + .usage = usage, + .stride = stride, + }; + status = AHardwareBuffer_createFromHandle(&desc, memory.handle(), + AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, + &hardwareBuffer); + if (status == NO_ERROR) { + break; + } + } + if (status != NO_ERROR) { + return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) + << "Can't create AHardwareBuffer from handle. Error: " << status; + } + + return std::make_shared(nn::Memory{ + .handle = nn::HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true), + .size = static_cast(memory.size()), + .name = memory.name(), + }); +} + +nn::GeneralResult hidlHandleFromSharedHandle(const nn::Handle& handle) { std::vector fds; - fds.reserve(handle->fds.size()); - for (const auto& fd : handle->fds) { - int dupFd = dup(fd); + fds.reserve(handle.fds.size()); + for (const auto& fd : handle.fds) { + const int dupFd = dup(fd); if (dupFd == -1) { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd"; } fds.emplace_back(dupFd); } - native_handle_t* nativeHandle = native_handle_create(handle->fds.size(), handle->ints.size()); + constexpr size_t kIntMax = std::numeric_limits::max(); + CHECK_LE(handle.fds.size(), kIntMax); + CHECK_LE(handle.ints.size(), kIntMax); + native_handle_t* nativeHandle = native_handle_create(static_cast(handle.fds.size()), + static_cast(handle.ints.size())); if (nativeHandle == nullptr) { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle"; } for (size_t i = 0; i < fds.size(); ++i) { nativeHandle->data[i] = fds[i].release(); } - std::copy(handle->ints.begin(), handle->ints.end(), &nativeHandle->data[nativeHandle->numFds]); + std::copy(handle.ints.begin(), handle.ints.end(), &nativeHandle->data[nativeHandle->numFds]); hidl_handle hidlHandle; hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true); return hidlHandle; } -nn::GeneralResult sharedHandleFromNativeHandle(const native_handle_t* handle) { +nn::GeneralResult sharedHandleFromNativeHandle(const native_handle_t* handle) { if (handle == nullptr) { - return nullptr; + return NN_ERROR() << "sharedHandleFromNativeHandle failed because handle is nullptr"; } std::vector fds; fds.reserve(handle->numFds); for (int i = 0; i < handle->numFds; ++i) { - int dupFd = dup(handle->data[i]); + const int dupFd = dup(handle->data[i]); if (dupFd == -1) { return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd"; } @@ -295,18 +382,18 @@ nn::GeneralResult sharedHandleFromNativeHandle(const native_ha std::vector ints(&handle->data[handle->numFds], &handle->data[handle->numFds + handle->numInts]); - return std::make_shared(nn::Handle{ - .fds = std::move(fds), - .ints = std::move(ints), - }); + return nn::Handle{.fds = std::move(fds), .ints = std::move(ints)}; } nn::GeneralResult> convertSyncFences( const std::vector& syncFences) { hidl_vec handles(syncFences.size()); for (size_t i = 0; i < syncFences.size(); ++i) { - handles[i] = - NN_TRY(hal::utils::hidlHandleFromSharedHandle(syncFences[i].getSharedHandle())); + const auto& handle = syncFences[i].getSharedHandle(); + if (handle == nullptr) { + return NN_ERROR() << "convertSyncFences failed because sync fence is empty"; + } + handles[i] = NN_TRY(hidlHandleFromSharedHandle(*handle)); } return handles; } -- cgit v1.2.3