summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Butler <butlermichael@google.com>2021-03-25 15:27:38 -0700
committerMichael Butler <butlermichael@google.com>2021-04-09 02:10:37 +0000
commitbed23d9c3310c1e9a00fa0f49a9b6209b83e23b0 (patch)
tree70b885b6474f0f99710645ef66443b44d2e2c714
parent8c4f0fcfc0387b73ee78b88a82fbec405d011a0a (diff)
Update NN utility code and VTS tests with new Memory type
This CL fixes the compiler errors that arose of changing the Memory representation of the NN AIDL HAL, and updates the conversion and utility code to work with the new Memory type. This change also makes libaidlcommonsupport available to apex modules at min sdk level 29. Bug: 183118727 Test: mma Test: VtsHalNeuralnetworksTargetTest Change-Id: Ief565473b4d82e0bb43785fc3b8275b16bd26cf6 Merged-In: Ief565473b4d82e0bb43785fc3b8275b16bd26cf6 (cherry picked from commit b0fcb3927d848e9721f05a458b5d6d4d2cb8079d)
-rw-r--r--common/support/Android.bp5
-rw-r--r--neuralnetworks/aidl/utils/Android.bp4
-rw-r--r--neuralnetworks/aidl/utils/src/Conversions.cpp283
-rw-r--r--neuralnetworks/aidl/utils/src/Utils.cpp62
-rw-r--r--neuralnetworks/aidl/vts/functional/Android.bp2
-rw-r--r--neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp21
-rw-r--r--neuralnetworks/aidl/vts/functional/ValidateModel.cpp16
-rw-r--r--neuralnetworks/utils/common/src/CommonUtils.cpp128
8 files changed, 348 insertions, 173 deletions
diff --git a/common/support/Android.bp b/common/support/Android.bp
index 8aea306dae..730798d840 100644
--- a/common/support/Android.bp
+++ b/common/support/Android.bp
@@ -18,6 +18,11 @@ cc_library_static {
"android.hardware.common-V2-ndk_platform",
"libcutils",
],
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.neuralnetworks",
+ ],
+ min_sdk_version: "29",
}
cc_test {
diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp
index ad961cfe99..0ccc711ecf 100644
--- a/neuralnetworks/aidl/utils/Android.bp
+++ b/neuralnetworks/aidl/utils/Android.bp
@@ -31,6 +31,8 @@ cc_library_static {
export_include_dirs: ["include"],
cflags: ["-Wthread-safety"],
static_libs: [
+ "android.hardware.graphics.common-V2-ndk_platform",
+ "libaidlcommonsupport",
"libarect",
"neuralnetworks_types",
"neuralnetworks_utils_hal_common",
@@ -51,7 +53,9 @@ cc_test {
],
static_libs: [
"android.hardware.common-V2-ndk_platform",
+ "android.hardware.graphics.common-V2-ndk_platform",
"android.hardware.neuralnetworks-V1-ndk_platform",
+ "libaidlcommonsupport",
"libgmock",
"libneuralnetworks_common",
"neuralnetworks_types",
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp
index d5f7f81663..93ac51c233 100644
--- a/neuralnetworks/aidl/utils/src/Conversions.cpp
+++ b/neuralnetworks/aidl/utils/src/Conversions.cpp
@@ -16,8 +16,13 @@
#include "Conversions.h"
+#include <aidl/android/hardware/common/Ashmem.h>
+#include <aidl/android/hardware/common/MappableFile.h>
#include <aidl/android/hardware/common/NativeHandle.h>
+#include <aidl/android/hardware/graphics/common/HardwareBuffer.h>
+#include <aidlcommonsupport/NativeHandle.h>
#include <android-base/logging.h>
+#include <android-base/mapped_file.h>
#include <android-base/unique_fd.h>
#include <android/binder_auto_utils.h>
#include <android/hardware_buffer.h>
@@ -125,28 +130,17 @@ struct NativeHandleDeleter {
using UniqueNativeHandle = std::unique_ptr<native_handle_t, NativeHandleDeleter>;
-static GeneralResult<UniqueNativeHandle> nativeHandleFromAidlHandle(const NativeHandle& handle) {
- std::vector<base::unique_fd> fds;
- fds.reserve(handle.fds.size());
- for (const auto& fd : handle.fds) {
- auto duplicatedFd = NN_TRY(dupFd(fd.get()));
- fds.emplace_back(duplicatedFd.release());
- }
-
- constexpr size_t kIntMax = std::numeric_limits<int>::max();
- CHECK_LE(handle.fds.size(), kIntMax);
- CHECK_LE(handle.ints.size(), kIntMax);
- native_handle_t* nativeHandle = native_handle_create(static_cast<int>(handle.fds.size()),
- static_cast<int>(handle.ints.size()));
- if (nativeHandle == nullptr) {
- return NN_ERROR() << "Failed to create native_handle";
+GeneralResult<UniqueNativeHandle> nativeHandleFromAidlHandle(const NativeHandle& handle) {
+ auto nativeHandle = UniqueNativeHandle(dupFromAidl(handle));
+ if (nativeHandle.get() == nullptr) {
+ return NN_ERROR() << "android::dupFromAidl failed to convert the common::NativeHandle to a "
+ "native_handle_t";
}
- for (size_t i = 0; i < fds.size(); ++i) {
- nativeHandle->data[i] = fds[i].release();
+ if (!std::all_of(nativeHandle->data + 0, nativeHandle->data + nativeHandle->numFds,
+ [](int fd) { return fd >= 0; })) {
+ return NN_ERROR() << "android::dupFromAidl returned an invalid native_handle_t";
}
- std::copy(handle.ints.begin(), handle.ints.end(), &nativeHandle->data[nativeHandle->numFds]);
-
- return UniqueNativeHandle(nativeHandle);
+ return nativeHandle;
}
} // anonymous namespace
@@ -353,67 +347,66 @@ GeneralResult<MeasureTiming> unvalidatedConvert(bool measureTiming) {
return measureTiming ? MeasureTiming::YES : MeasureTiming::NO;
}
-static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
- return (value + multiple - 1) / multiple * multiple;
-}
-
GeneralResult<SharedMemory> unvalidatedConvert(const aidl_hal::Memory& memory) {
- VERIFY_NON_NEGATIVE(memory.size) << "Memory size must not be negative";
- if (memory.size > std::numeric_limits<size_t>::max()) {
- return NN_ERROR() << "Memory: size must be <= std::numeric_limits<size_t>::max()";
- }
-
- if (memory.name != "hardware_buffer_blob") {
- return std::make_shared<const Memory>(Memory{
- .handle = NN_TRY(unvalidatedConvertHelper(memory.handle)),
- .size = static_cast<size_t>(memory.size),
- .name = memory.name,
- });
- }
-
- const auto size = static_cast<uint32_t>(memory.size);
- const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
- const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
- const uint32_t width = size;
- const uint32_t height = 1; // height is always 1 for BLOB mode AHardwareBuffer.
- const uint32_t layers = 1; // layers is always 1 for BLOB mode AHardwareBuffer.
-
- const UniqueNativeHandle handle = NN_TRY(nativeHandleFromAidlHandle(memory.handle));
- const native_handle_t* nativeHandle = handle.get();
-
- // AHardwareBuffer_createFromHandle() might fail because an allocator
- // expects a specific stride value. In that case, we try to guess it by
- // aligning the width to small powers of 2.
- // TODO(b/174120849): Avoid stride assumptions.
- AHardwareBuffer* hardwareBuffer = nullptr;
- status_t status = UNKNOWN_ERROR;
- for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
- const uint32_t stride = roundUpToMultiple(width, alignment);
- AHardwareBuffer_Desc desc{
- .width = width,
- .height = height,
- .layers = layers,
- .format = format,
- .usage = usage,
- .stride = stride,
- };
- status = AHardwareBuffer_createFromHandle(&desc, nativeHandle,
- AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
- &hardwareBuffer);
- if (status == NO_ERROR) {
- break;
+ using Tag = aidl_hal::Memory::Tag;
+ switch (memory.getTag()) {
+ case Tag::ashmem: {
+ const auto& ashmem = memory.get<Tag::ashmem>();
+ VERIFY_NON_NEGATIVE(ashmem.size) << "Memory size must not be negative";
+ if (ashmem.size > std::numeric_limits<size_t>::max()) {
+ return NN_ERROR() << "Memory: size must be <= std::numeric_limits<size_t>::max()";
+ }
+
+ auto handle = Memory::Ashmem{
+ .fd = NN_TRY(dupFd(ashmem.fd.get())),
+ .size = static_cast<size_t>(ashmem.size),
+ };
+ return std::make_shared<const Memory>(Memory{.handle = std::move(handle)});
+ }
+ case Tag::mappableFile: {
+ const auto& mappableFile = memory.get<Tag::mappableFile>();
+ VERIFY_NON_NEGATIVE(mappableFile.length) << "Memory size must not be negative";
+ VERIFY_NON_NEGATIVE(mappableFile.offset) << "Memory offset must not be negative";
+ if (mappableFile.length > std::numeric_limits<size_t>::max()) {
+ return NN_ERROR() << "Memory: size must be <= std::numeric_limits<size_t>::max()";
+ }
+ if (mappableFile.offset > std::numeric_limits<size_t>::max()) {
+ return NN_ERROR() << "Memory: offset must be <= std::numeric_limits<size_t>::max()";
+ }
+
+ const size_t size = static_cast<size_t>(mappableFile.length);
+ const int prot = mappableFile.prot;
+ const int fd = mappableFile.fd.get();
+ const size_t offset = static_cast<size_t>(mappableFile.offset);
+
+ return createSharedMemoryFromFd(size, prot, fd, offset);
+ }
+ case Tag::hardwareBuffer: {
+ const auto& hardwareBuffer = memory.get<Tag::hardwareBuffer>();
+
+ const UniqueNativeHandle handle =
+ NN_TRY(nativeHandleFromAidlHandle(hardwareBuffer.handle));
+ const native_handle_t* nativeHandle = handle.get();
+
+ const AHardwareBuffer_Desc desc{
+ .width = static_cast<uint32_t>(hardwareBuffer.description.width),
+ .height = static_cast<uint32_t>(hardwareBuffer.description.height),
+ .layers = static_cast<uint32_t>(hardwareBuffer.description.layers),
+ .format = static_cast<uint32_t>(hardwareBuffer.description.format),
+ .usage = static_cast<uint64_t>(hardwareBuffer.description.usage),
+ .stride = static_cast<uint32_t>(hardwareBuffer.description.stride),
+ };
+ AHardwareBuffer* ahwb = nullptr;
+ const status_t status = AHardwareBuffer_createFromHandle(
+ &desc, nativeHandle, AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &ahwb);
+ if (status != NO_ERROR) {
+ return NN_ERROR() << "createFromHandle failed";
+ }
+
+ return createSharedMemoryFromAHWB(ahwb, /*takeOwnership=*/true);
}
}
- if (status != NO_ERROR) {
- return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
- << "Can't create AHardwareBuffer from handle. Error: " << status;
- }
-
- return std::make_shared<const Memory>(Memory{
- .handle = HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true),
- .size = static_cast<size_t>(memory.size),
- .name = memory.name,
- });
+ return NN_ERROR() << "Unrecognized Memory::Tag: " << memory.getTag();
}
GeneralResult<Timing> unvalidatedConvert(const aidl_hal::Timing& timing) {
@@ -645,20 +638,95 @@ struct overloaded : Ts... {
template <class... Ts>
overloaded(Ts...)->overloaded<Ts...>;
-static nn::GeneralResult<common::NativeHandle> aidlHandleFromNativeHandle(
- const native_handle_t& handle) {
- common::NativeHandle aidlNativeHandle;
+nn::GeneralResult<common::NativeHandle> aidlHandleFromNativeHandle(
+ const native_handle_t& nativeHandle) {
+ auto handle = ::android::dupToAidl(&nativeHandle);
+ if (!std::all_of(handle.fds.begin(), handle.fds.end(),
+ [](const ndk::ScopedFileDescriptor& fd) { return fd.get() >= 0; })) {
+ return NN_ERROR() << "android::dupToAidl returned an invalid common::NativeHandle";
+ }
+ return handle;
+}
- aidlNativeHandle.fds.reserve(handle.numFds);
- for (int i = 0; i < handle.numFds; ++i) {
- auto duplicatedFd = NN_TRY(nn::dupFd(handle.data[i]));
- aidlNativeHandle.fds.emplace_back(duplicatedFd.release());
+nn::GeneralResult<Memory> unvalidatedConvert(const nn::Memory::Ashmem& memory) {
+ if constexpr (std::numeric_limits<size_t>::max() > std::numeric_limits<int64_t>::max()) {
+ if (memory.size > std::numeric_limits<int64_t>::max()) {
+ return (
+ NN_ERROR()
+ << "Memory::Ashmem: size must be <= std::numeric_limits<int64_t>::max()")
+ .
+ operator nn::GeneralResult<Memory>();
+ }
}
- aidlNativeHandle.ints = std::vector<int>(&handle.data[handle.numFds],
- &handle.data[handle.numFds + handle.numInts]);
+ auto fd = NN_TRY(nn::dupFd(memory.fd));
+ auto handle = common::Ashmem{
+ .fd = ndk::ScopedFileDescriptor(fd.release()),
+ .size = static_cast<int64_t>(memory.size),
+ };
+ return Memory::make<Memory::Tag::ashmem>(std::move(handle));
+}
- return aidlNativeHandle;
+nn::GeneralResult<Memory> unvalidatedConvert(const nn::Memory::Fd& memory) {
+ if constexpr (std::numeric_limits<size_t>::max() > std::numeric_limits<int64_t>::max()) {
+ if (memory.size > std::numeric_limits<int64_t>::max()) {
+ return (NN_ERROR() << "Memory::Fd: size must be <= std::numeric_limits<int64_t>::max()")
+ .
+ operator nn::GeneralResult<Memory>();
+ }
+ if (memory.offset > std::numeric_limits<int64_t>::max()) {
+ return (
+ NN_ERROR()
+ << "Memory::Fd: offset must be <= std::numeric_limits<int64_t>::max()")
+ .
+ operator nn::GeneralResult<Memory>();
+ }
+ }
+
+ auto fd = NN_TRY(nn::dupFd(memory.fd));
+ auto handle = common::MappableFile{
+ .length = static_cast<int64_t>(memory.size),
+ .prot = memory.prot,
+ .fd = ndk::ScopedFileDescriptor(fd.release()),
+ .offset = static_cast<int64_t>(memory.offset),
+ };
+ return Memory::make<Memory::Tag::mappableFile>(std::move(handle));
+}
+
+nn::GeneralResult<Memory> unvalidatedConvert(const nn::Memory::HardwareBuffer& memory) {
+ const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(memory.handle.get());
+ if (nativeHandle == nullptr) {
+ return (NN_ERROR() << "unvalidatedConvert failed because AHardwareBuffer_getNativeHandle "
+ "returned nullptr")
+ .
+ operator nn::GeneralResult<Memory>();
+ }
+
+ auto handle = NN_TRY(aidlHandleFromNativeHandle(*nativeHandle));
+
+ AHardwareBuffer_Desc desc;
+ AHardwareBuffer_describe(memory.handle.get(), &desc);
+
+ const auto description = graphics::common::HardwareBufferDescription{
+ .width = static_cast<int32_t>(desc.width),
+ .height = static_cast<int32_t>(desc.height),
+ .layers = static_cast<int32_t>(desc.layers),
+ .format = static_cast<graphics::common::PixelFormat>(desc.format),
+ .usage = static_cast<graphics::common::BufferUsage>(desc.usage),
+ .stride = static_cast<int32_t>(desc.stride),
+ };
+
+ auto hardwareBuffer = graphics::common::HardwareBuffer{
+ .description = std::move(description),
+ .handle = std::move(handle),
+ };
+ return Memory::make<Memory::Tag::hardwareBuffer>(std::move(hardwareBuffer));
+}
+
+nn::GeneralResult<Memory> unvalidatedConvert(const nn::Memory::Unknown& /*memory*/) {
+ return (NN_ERROR() << "Unable to convert Unknown memory type")
+ .
+ operator nn::GeneralResult<Memory>();
}
} // namespace
@@ -693,41 +761,12 @@ nn::GeneralResult<common::NativeHandle> unvalidatedConvert(const nn::SharedHandl
}
nn::GeneralResult<Memory> unvalidatedConvert(const nn::SharedMemory& memory) {
- CHECK(memory != nullptr);
- if (memory->size > std::numeric_limits<int64_t>::max()) {
- return NN_ERROR() << "Memory size doesn't fit into int64_t.";
+ if (memory == nullptr) {
+ return (NN_ERROR() << "Unable to convert nullptr memory")
+ .
+ operator nn::GeneralResult<Memory>();
}
- if (const auto* handle = std::get_if<nn::Handle>(&memory->handle)) {
- return Memory{
- .handle = NN_TRY(unvalidatedConvert(*handle)),
- .size = static_cast<int64_t>(memory->size),
- .name = memory->name,
- };
- }
-
- const auto* ahwb = std::get<nn::HardwareBufferHandle>(memory->handle).get();
- AHardwareBuffer_Desc bufferDesc;
- AHardwareBuffer_describe(ahwb, &bufferDesc);
-
- if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
- CHECK_EQ(memory->size, bufferDesc.width);
- CHECK_EQ(memory->name, "hardware_buffer_blob");
- } else {
- CHECK_EQ(memory->size, 0u);
- CHECK_EQ(memory->name, "hardware_buffer");
- }
-
- const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
- if (nativeHandle == nullptr) {
- return NN_ERROR() << "unvalidatedConvert failed because AHardwareBuffer_getNativeHandle "
- "returned nullptr";
- }
-
- return Memory{
- .handle = NN_TRY(aidlHandleFromNativeHandle(*nativeHandle)),
- .size = static_cast<int64_t>(memory->size),
- .name = memory->name,
- };
+ return std::visit([](const auto& x) { return unvalidatedConvert(x); }, memory->handle);
}
nn::GeneralResult<ErrorStatus> unvalidatedConvert(const nn::ErrorStatus& errorStatus) {
diff --git a/neuralnetworks/aidl/utils/src/Utils.cpp b/neuralnetworks/aidl/utils/src/Utils.cpp
index 95516c854b..03407be4ce 100644
--- a/neuralnetworks/aidl/utils/src/Utils.cpp
+++ b/neuralnetworks/aidl/utils/src/Utils.cpp
@@ -16,12 +16,20 @@
#include "Utils.h"
+#include <aidl/android/hardware/common/Ashmem.h>
+#include <aidl/android/hardware/common/MappableFile.h>
+#include <aidl/android/hardware/graphics/common/HardwareBuffer.h>
+#include <android/binder_auto_utils.h>
#include <android/binder_status.h>
#include <nnapi/Result.h>
+#include <nnapi/SharedMemory.h>
namespace aidl::android::hardware::neuralnetworks::utils {
namespace {
+nn::GeneralResult<ndk::ScopedFileDescriptor> clone(const ndk::ScopedFileDescriptor& fd);
+using utils::clone;
+
template <typename Type>
nn::GeneralResult<std::vector<Type>> cloneVec(const std::vector<Type>& arguments) {
std::vector<Type> clonedObjects;
@@ -37,24 +45,52 @@ nn::GeneralResult<std::vector<Type>> clone(const std::vector<Type>& arguments) {
return cloneVec(arguments);
}
+nn::GeneralResult<ndk::ScopedFileDescriptor> clone(const ndk::ScopedFileDescriptor& fd) {
+ auto duplicatedFd = NN_TRY(nn::dupFd(fd.get()));
+ return ndk::ScopedFileDescriptor(duplicatedFd.release());
+}
+
+nn::GeneralResult<common::NativeHandle> clone(const common::NativeHandle& handle) {
+ return common::NativeHandle{
+ .fds = NN_TRY(cloneVec(handle.fds)),
+ .ints = handle.ints,
+ };
+}
+
} // namespace
nn::GeneralResult<Memory> clone(const Memory& memory) {
- common::NativeHandle nativeHandle;
- nativeHandle.ints = memory.handle.ints;
- nativeHandle.fds.reserve(memory.handle.fds.size());
- for (const auto& fd : memory.handle.fds) {
- const int newFd = dup(fd.get());
- if (newFd < 0) {
- return NN_ERROR() << "Couldn't dup a file descriptor";
+ switch (memory.getTag()) {
+ case Memory::Tag::ashmem: {
+ const auto& ashmem = memory.get<Memory::Tag::ashmem>();
+ auto handle = common::Ashmem{
+ .fd = NN_TRY(clone(ashmem.fd)),
+ .size = ashmem.size,
+ };
+ return Memory::make<Memory::Tag::ashmem>(std::move(handle));
+ }
+ case Memory::Tag::mappableFile: {
+ const auto& memFd = memory.get<Memory::Tag::mappableFile>();
+ auto handle = common::MappableFile{
+ .length = memFd.length,
+ .prot = memFd.prot,
+ .fd = NN_TRY(clone(memFd.fd)),
+ .offset = memFd.offset,
+ };
+ return Memory::make<Memory::Tag::mappableFile>(std::move(handle));
+ }
+ case Memory::Tag::hardwareBuffer: {
+ const auto& hardwareBuffer = memory.get<Memory::Tag::hardwareBuffer>();
+ auto handle = graphics::common::HardwareBuffer{
+ .description = hardwareBuffer.description,
+ .handle = NN_TRY(clone(hardwareBuffer.handle)),
+ };
+ return Memory::make<Memory::Tag::hardwareBuffer>(std::move(handle));
}
- nativeHandle.fds.emplace_back(newFd);
}
- return Memory{
- .handle = std::move(nativeHandle),
- .size = memory.size,
- .name = memory.name,
- };
+ return (NN_ERROR() << "Unrecognized Memory::Tag: " << memory.getTag())
+ .
+ operator nn::GeneralResult<Memory>();
}
nn::GeneralResult<RequestMemoryPool> clone(const RequestMemoryPool& requestPool) {
diff --git a/neuralnetworks/aidl/vts/functional/Android.bp b/neuralnetworks/aidl/vts/functional/Android.bp
index 7804c2a765..d5b150a934 100644
--- a/neuralnetworks/aidl/vts/functional/Android.bp
+++ b/neuralnetworks/aidl/vts/functional/Android.bp
@@ -50,9 +50,11 @@ cc_test {
],
static_libs: [
"android.hardware.common-V2-ndk_platform",
+ "android.hardware.graphics.common-V2-ndk_platform",
"android.hardware.neuralnetworks-V1-ndk_platform",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
+ "libaidlcommonsupport",
"libgmock",
"libhidlmemory",
"libneuralnetworks_generated_test_harness",
diff --git a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
index 596f8ae58e..e8313f19eb 100644
--- a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
+++ b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
@@ -16,6 +16,7 @@
#define LOG_TAG "neuralnetworks_aidl_hal_test"
+#include <aidl/android/hardware/graphics/common/PixelFormat.h>
#include <android-base/logging.h>
#include <android/binder_auto_utils.h>
#include <android/binder_interface_utils.h>
@@ -659,10 +660,26 @@ class MemoryDomainCopyTestBase : public MemoryDomainTestBase {
return allocateBuffer(preparedModel, inputIndexes, outputIndexes, {});
}
+ size_t getSize(const Memory& memory) {
+ switch (memory.getTag()) {
+ case Memory::Tag::ashmem:
+ return memory.get<Memory::Tag::ashmem>().size;
+ case Memory::Tag::mappableFile:
+ return memory.get<Memory::Tag::mappableFile>().length;
+ case Memory::Tag::hardwareBuffer: {
+ const auto& hardwareBuffer = memory.get<Memory::Tag::hardwareBuffer>();
+ const bool isBlob =
+ hardwareBuffer.description.format == graphics::common::PixelFormat::BLOB;
+ return isBlob ? hardwareBuffer.description.width : 0;
+ }
+ }
+ return 0;
+ }
+
Memory allocateSharedMemory(uint32_t size) {
const auto sharedMemory = nn::createSharedMemory(size).value();
auto memory = utils::convert(sharedMemory).value();
- EXPECT_EQ(memory.size, size);
+ EXPECT_EQ(getSize(memory), size);
return memory;
}
@@ -690,7 +707,7 @@ class MemoryDomainCopyTestBase : public MemoryDomainTestBase {
void initializeDeviceMemory(const std::shared_ptr<IBuffer>& buffer) {
Memory memory = allocateSharedMemory(kTestOperandDataSize);
- ASSERT_EQ(memory.size, kTestOperandDataSize);
+ ASSERT_EQ(getSize(memory), kTestOperandDataSize);
testCopyFrom(buffer, memory, utils::toSigned(kTestOperand.dimensions).value(),
ErrorStatus::NONE);
}
diff --git a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
index 94d3daf6bb..698c054941 100644
--- a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
@@ -259,12 +259,16 @@ template <>
size_t sizeForBinder(const Memory& memory) {
// This is just a guess.
- size_t size = 0;
- const NativeHandle& handle = memory.handle;
- size += sizeof(decltype(handle.fds)::value_type) * handle.fds.size();
- size += sizeof(decltype(handle.ints)::value_type) * handle.ints.size();
- size += sizeForBinder(memory.name);
- size += sizeof(memory);
+ size_t size = sizeof(Memory);
+
+ // Only hardwareBuffer type memory has dynamic memory that needs to be accounted for (in the
+ // form of a NativeHandle type). The other other types of memory (MappableFile, Ashmem) use a
+ // single file descriptor (with metadata) instead.
+ if (memory.getTag() == Memory::Tag::hardwareBuffer) {
+ const NativeHandle& handle = memory.get<Memory::Tag::hardwareBuffer>().handle;
+ size += sizeof(decltype(handle.fds)::value_type) * handle.fds.size();
+ size += sizeof(decltype(handle.ints)::value_type) * handle.ints.size();
+ }
return size;
}
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 924ecb2d1b..4d26795d89 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -89,6 +89,59 @@ void copyPointersToSharedMemory(nn::Model::Subgraph* subgraph,
});
}
+nn::GeneralResult<hidl_handle> createNativeHandleFrom(base::unique_fd fd,
+ const std::vector<int32_t>& ints) {
+ constexpr size_t kIntMax = std::numeric_limits<int>::max();
+ CHECK_LE(ints.size(), kIntMax);
+ native_handle_t* nativeHandle = native_handle_create(1, static_cast<int>(ints.size()));
+ if (nativeHandle == nullptr) {
+ return NN_ERROR() << "Failed to create native_handle";
+ }
+
+ nativeHandle->data[0] = fd.release();
+ std::copy(ints.begin(), ints.end(), nativeHandle->data + 1);
+
+ hidl_handle handle;
+ handle.setTo(nativeHandle, /*shouldOwn=*/true);
+ return handle;
+}
+
+nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Ashmem& memory) {
+ auto fd = NN_TRY(nn::dupFd(memory.fd));
+ auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), {}));
+ return hidl_memory("ashmem", std::move(handle), memory.size);
+}
+
+nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Fd& memory) {
+ auto fd = NN_TRY(nn::dupFd(memory.fd));
+
+ const auto [lowOffsetBits, highOffsetBits] = nn::getIntsFromOffset(memory.offset);
+ const std::vector<int> ints = {memory.prot, lowOffsetBits, highOffsetBits};
+
+ auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), ints));
+ return hidl_memory("mmap_fd", std::move(handle), memory.size);
+}
+
+nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::HardwareBuffer& memory) {
+ const auto* ahwb = memory.handle.get();
+ AHardwareBuffer_Desc bufferDesc;
+ AHardwareBuffer_describe(ahwb, &bufferDesc);
+
+ const bool isBlob = bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB;
+ const size_t size = isBlob ? bufferDesc.width : 0;
+ const char* const name = isBlob ? "hardware_buffer_blob" : "hardware_buffer";
+
+ const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
+ const hidl_handle hidlHandle(nativeHandle);
+ hidl_handle copiedHandle(hidlHandle);
+
+ return hidl_memory(name, std::move(copiedHandle), size);
+}
+
+nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Unknown& memory) {
+ return hidl_memory(memory.name, NN_TRY(hidlHandleFromSharedHandle(memory.handle)), memory.size);
+}
+
} // anonymous namespace
nn::Capabilities::OperandPerformanceTable makeQuantized8PerformanceConsistentWithP(
@@ -255,27 +308,7 @@ nn::GeneralResult<hidl_memory> createHidlMemoryFromSharedMemory(const nn::Shared
if (memory == nullptr) {
return NN_ERROR() << "Memory must be non-empty";
}
- if (const auto* handle = std::get_if<nn::Handle>(&memory->handle)) {
- return hidl_memory(memory->name, NN_TRY(hidlHandleFromSharedHandle(*handle)), memory->size);
- }
-
- const auto* ahwb = std::get<nn::HardwareBufferHandle>(memory->handle).get();
- AHardwareBuffer_Desc bufferDesc;
- AHardwareBuffer_describe(ahwb, &bufferDesc);
-
- if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) {
- CHECK_EQ(memory->size, bufferDesc.width);
- CHECK_EQ(memory->name, "hardware_buffer_blob");
- } else {
- CHECK_EQ(memory->size, 0u);
- CHECK_EQ(memory->name, "hardware_buffer");
- }
-
- const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
- const hidl_handle hidlHandle(nativeHandle);
- hidl_handle handle(hidlHandle);
-
- return hidl_memory(memory->name, std::move(handle), memory->size);
+ return std::visit([](const auto& x) { return createHidlMemoryFrom(x); }, memory->handle);
}
static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
@@ -283,14 +316,53 @@ static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
}
nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_memory& memory) {
- CHECK_LE(memory.size(), std::numeric_limits<uint32_t>::max());
+ CHECK_LE(memory.size(), std::numeric_limits<size_t>::max());
+ if (!memory.valid()) {
+ return NN_ERROR() << "Unable to convert invalid hidl_memory";
+ }
+
+ if (memory.name() == "ashmem") {
+ if (memory.handle()->numFds != 1) {
+ return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
+ << memory.handle()->numFds << " numFds, but expected 1";
+ }
+ if (memory.handle()->numInts != 0) {
+ return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
+ << memory.handle()->numInts << " numInts, but expected 0";
+ }
+ auto handle = nn::Memory::Ashmem{
+ .fd = NN_TRY(nn::dupFd(memory.handle()->data[0])),
+ .size = static_cast<size_t>(memory.size()),
+ };
+ return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
+ }
+
+ if (memory.name() == "mmap_fd") {
+ if (memory.handle()->numFds != 1) {
+ return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with "
+ << memory.handle()->numFds << " numFds, but expected 1";
+ }
+ if (memory.handle()->numInts != 3) {
+ return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with "
+ << memory.handle()->numInts << " numInts, but expected 3";
+ }
+
+ const int fd = memory.handle()->data[0];
+ const int prot = memory.handle()->data[1];
+ const int lower = memory.handle()->data[2];
+ const int higher = memory.handle()->data[3];
+ const size_t offset = nn::getOffsetFromInts(lower, higher);
+
+ return nn::createSharedMemoryFromFd(static_cast<size_t>(memory.size()), prot, fd, offset);
+ }
if (memory.name() != "hardware_buffer_blob") {
- return std::make_shared<const nn::Memory>(nn::Memory{
+ auto handle = nn::Memory::Unknown{
.handle = NN_TRY(sharedHandleFromNativeHandle(memory.handle())),
- .size = static_cast<uint32_t>(memory.size()),
+ .size = static_cast<size_t>(memory.size()),
.name = memory.name(),
- });
+ };
+ return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
}
const auto size = memory.size();
@@ -328,11 +400,7 @@ nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_
<< "Can't create AHardwareBuffer from handle. Error: " << status;
}
- return std::make_shared<const nn::Memory>(nn::Memory{
- .handle = nn::HardwareBufferHandle(hardwareBuffer, /*takeOwnership=*/true),
- .size = static_cast<uint32_t>(memory.size()),
- .name = memory.name(),
- });
+ return nn::createSharedMemoryFromAHWB(hardwareBuffer, /*takeOwnership=*/true);
}
nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::Handle& handle) {