summaryrefslogtreecommitdiff
path: root/neuralnetworks/aidl/utils/src/Utils.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'neuralnetworks/aidl/utils/src/Utils.cpp')
-rw-r--r--neuralnetworks/aidl/utils/src/Utils.cpp26
1 files changed, 16 insertions, 10 deletions
diff --git a/neuralnetworks/aidl/utils/src/Utils.cpp b/neuralnetworks/aidl/utils/src/Utils.cpp
index 03407be4ce..f9b4f6edbc 100644
--- a/neuralnetworks/aidl/utils/src/Utils.cpp
+++ b/neuralnetworks/aidl/utils/src/Utils.cpp
@@ -51,8 +51,9 @@ nn::GeneralResult<ndk::ScopedFileDescriptor> clone(const ndk::ScopedFileDescript
}
nn::GeneralResult<common::NativeHandle> clone(const common::NativeHandle& handle) {
+ auto fds = NN_TRY(cloneVec(handle.fds));
return common::NativeHandle{
- .fds = NN_TRY(cloneVec(handle.fds)),
+ .fds = std::move(fds),
.ints = handle.ints,
};
}
@@ -63,32 +64,35 @@ nn::GeneralResult<Memory> clone(const Memory& memory) {
switch (memory.getTag()) {
case Memory::Tag::ashmem: {
const auto& ashmem = memory.get<Memory::Tag::ashmem>();
+ auto fd = NN_TRY(clone(ashmem.fd));
auto handle = common::Ashmem{
- .fd = NN_TRY(clone(ashmem.fd)),
+ .fd = std::move(fd),
.size = ashmem.size,
};
return Memory::make<Memory::Tag::ashmem>(std::move(handle));
}
case Memory::Tag::mappableFile: {
const auto& memFd = memory.get<Memory::Tag::mappableFile>();
+ auto fd = NN_TRY(clone(memFd.fd));
auto handle = common::MappableFile{
.length = memFd.length,
.prot = memFd.prot,
- .fd = NN_TRY(clone(memFd.fd)),
+ .fd = std::move(fd),
.offset = memFd.offset,
};
return Memory::make<Memory::Tag::mappableFile>(std::move(handle));
}
case Memory::Tag::hardwareBuffer: {
const auto& hardwareBuffer = memory.get<Memory::Tag::hardwareBuffer>();
- auto handle = graphics::common::HardwareBuffer{
+ auto handle = NN_TRY(clone(hardwareBuffer.handle));
+ auto ahwbHandle = graphics::common::HardwareBuffer{
.description = hardwareBuffer.description,
- .handle = NN_TRY(clone(hardwareBuffer.handle)),
+ .handle = std::move(handle),
};
- return Memory::make<Memory::Tag::hardwareBuffer>(std::move(handle));
+ return Memory::make<Memory::Tag::hardwareBuffer>(std::move(ahwbHandle));
}
}
- return (NN_ERROR() << "Unrecognized Memory::Tag: " << memory.getTag())
+ return (NN_ERROR() << "Unrecognized Memory::Tag: " << underlyingType(memory.getTag()))
.
operator nn::GeneralResult<Memory>();
}
@@ -103,25 +107,27 @@ nn::GeneralResult<RequestMemoryPool> clone(const RequestMemoryPool& requestPool)
}
// Using explicit type conversion because std::variant inside the RequestMemoryPool confuses the
// compiler.
- return (NN_ERROR() << "Unrecognized request pool tag: " << requestPool.getTag())
+ return (NN_ERROR() << "Unrecognized request pool tag: " << underlyingType(requestPool.getTag()))
.
operator nn::GeneralResult<RequestMemoryPool>();
}
nn::GeneralResult<Request> clone(const Request& request) {
+ auto pools = NN_TRY(clone(request.pools));
return Request{
.inputs = request.inputs,
.outputs = request.outputs,
- .pools = NN_TRY(clone(request.pools)),
+ .pools = std::move(pools),
};
}
nn::GeneralResult<Model> clone(const Model& model) {
+ auto pools = NN_TRY(clone(model.pools));
return Model{
.main = model.main,
.referenced = model.referenced,
.operandValues = model.operandValues,
- .pools = NN_TRY(clone(model.pools)),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
.extensionNameToPrefix = model.extensionNameToPrefix,
};