diff options
Diffstat (limited to 'neuralnetworks/1.3/utils/src/Conversions.cpp')
-rw-r--r-- | neuralnetworks/1.3/utils/src/Conversions.cpp | 164 |
1 files changed, 106 insertions, 58 deletions
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp index e8a4f55afd..4eeb414dc8 100644 --- a/neuralnetworks/1.3/utils/src/Conversions.cpp +++ b/neuralnetworks/1.3/utils/src/Conversions.cpp @@ -28,7 +28,6 @@ #include <nnapi/hal/1.0/Conversions.h> #include <nnapi/hal/1.2/Conversions.h> #include <nnapi/hal/CommonUtils.h> -#include <nnapi/hal/HandleError.h> #include <algorithm> #include <chrono> @@ -131,32 +130,38 @@ GeneralResult<Capabilities> unvalidatedConvert(const hal::V1_3::Capabilities& ca } auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance)); - auto table = NN_TRY(hal::utils::makeGeneralFailure( - Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)), - nn::ErrorStatus::GENERAL_FAILURE)); - + auto table = + NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance))); + + const auto relaxedFloat32toFloat16PerformanceScalar = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)); + const auto relaxedFloat32toFloat16PerformanceTensor = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); + const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)); + const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)); return Capabilities{ - .relaxedFloat32toFloat16PerformanceScalar = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)), - .relaxedFloat32toFloat16PerformanceTensor = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), + .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor, .operandPerformance = std::move(table), - .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)), - .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)), + .ifPerformance = ifPerformance, + .whilePerformance = whilePerformance, }; } GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert( const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) { + const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type)); + const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info)); return Capabilities::OperandPerformance{ - .type = NN_TRY(unvalidatedConvert(operandPerformance.type)), - .info = NN_TRY(unvalidatedConvert(operandPerformance.info)), + .type = type, + .info = info, }; } GeneralResult<Operation> unvalidatedConvert(const hal::V1_3::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), + .type = type, .inputs = operation.inputs, .outputs = operation.outputs, }; @@ -168,25 +173,34 @@ GeneralResult<Operand::LifeTime> unvalidatedConvert( } GeneralResult<Operand> unvalidatedConvert(const hal::V1_3::Operand& operand) { + const auto type = NN_TRY(unvalidatedConvert(operand.type)); + const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)); + const auto location = NN_TRY(unvalidatedConvert(operand.location)); + auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)); return Operand{ - .type = NN_TRY(unvalidatedConvert(operand.type)), + .type = type, .dimensions = operand.dimensions, .scale = operand.scale, .zeroPoint = operand.zeroPoint, - .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), - .location = NN_TRY(unvalidatedConvert(operand.location)), - .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), + .lifetime = lifetime, + .location = location, + .extraParams = std::move(extraParams), }; } GeneralResult<Model> unvalidatedConvert(const hal::V1_3::Model& model) { + auto main = NN_TRY(unvalidatedConvert(model.main)); + auto referenced = NN_TRY(unvalidatedConvert(model.referenced)); + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); + auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)); return Model{ - .main = NN_TRY(unvalidatedConvert(model.main)), - .referenced = NN_TRY(unvalidatedConvert(model.referenced)), - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .main = std::move(main), + .referenced = std::move(referenced), + .operandValues = std::move(operandValues), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), + .extensionNameToPrefix = std::move(extensionNameToPrefix), }; } @@ -195,7 +209,7 @@ GeneralResult<Model::Subgraph> unvalidatedConvert(const hal::V1_3::Subgraph& sub // Verify number of consumers. const auto numberOfConsumers = - NN_TRY(hal::utils::countNumberOfConsumers(subgraph.operands.size(), operations)); + NN_TRY(countNumberOfConsumers(subgraph.operands.size(), operations)); CHECK(subgraph.operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < subgraph.operands.size(); ++i) { if (subgraph.operands[i].numberOfConsumers != numberOfConsumers[i]) { @@ -206,8 +220,9 @@ GeneralResult<Model::Subgraph> unvalidatedConvert(const hal::V1_3::Subgraph& sub } } + auto operands = NN_TRY(unvalidatedConvert(subgraph.operands)); return Model::Subgraph{ - .operands = NN_TRY(unvalidatedConvert(subgraph.operands)), + .operands = std::move(operands), .operations = std::move(operations), .inputIndexes = subgraph.inputIndexes, .outputIndexes = subgraph.outputIndexes, @@ -227,10 +242,13 @@ GeneralResult<BufferRole> unvalidatedConvert(const hal::V1_3::BufferRole& buffer } GeneralResult<Request> unvalidatedConvert(const hal::V1_3::Request& request) { + auto inputs = NN_TRY(unvalidatedConvert(request.inputs)); + auto outputs = NN_TRY(unvalidatedConvert(request.outputs)); + auto pools = NN_TRY(unvalidatedConvert(request.pools)); return Request{ - .inputs = NN_TRY(unvalidatedConvert(request.inputs)), - .outputs = NN_TRY(unvalidatedConvert(request.outputs)), - .pools = NN_TRY(unvalidatedConvert(request.pools)), + .inputs = std::move(inputs), + .outputs = std::move(outputs), + .pools = std::move(pools), }; } @@ -239,7 +257,7 @@ GeneralResult<Request::MemoryPool> unvalidatedConvert( using Discriminator = hal::V1_3::Request::MemoryPool::hidl_discriminator; switch (memoryPool.getDiscriminator()) { case Discriminator::hidlMemory: - return hal::utils::createSharedMemoryFromHidlMemory(memoryPool.hidlMemory()); + return unvalidatedConvert(memoryPool.hidlMemory()); case Discriminator::token: return static_cast<Request::MemoryDomainToken>(memoryPool.token()); } @@ -381,7 +399,7 @@ nn::GeneralResult<hidl_vec<uint8_t>> unvalidatedConvert( } nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle) { - return V1_2::utils::unvalidatedConvert(handle); + return V1_0::utils::unvalidatedConvert(handle); } nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) { @@ -398,7 +416,7 @@ nn::GeneralResult<V1_2::Operand::ExtraParams> unvalidatedConvert( } nn::GeneralResult<V1_2::Model::ExtensionNameAndPrefix> unvalidatedConvert( - const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) { + const nn::ExtensionNameAndPrefix& extensionNameAndPrefix) { return V1_2::utils::unvalidatedConvert(extensionNameAndPrefix); } @@ -465,37 +483,45 @@ nn::GeneralResult<Priority> unvalidatedConvert(const nn::Priority& priority) { } nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) { - std::vector<nn::Capabilities::OperandPerformance> operandPerformance; - operandPerformance.reserve(capabilities.operandPerformance.asVector().size()); + std::vector<nn::Capabilities::OperandPerformance> filteredOperandPerformances; + filteredOperandPerformances.reserve(capabilities.operandPerformance.asVector().size()); std::copy_if(capabilities.operandPerformance.asVector().begin(), capabilities.operandPerformance.asVector().end(), - std::back_inserter(operandPerformance), + std::back_inserter(filteredOperandPerformances), [](const nn::Capabilities::OperandPerformance& operandPerformance) { return compliantVersion(operandPerformance.type).has_value(); }); + const auto relaxedFloat32toFloat16PerformanceScalar = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)); + const auto relaxedFloat32toFloat16PerformanceTensor = + NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)); + auto operandPerformance = NN_TRY(unvalidatedConvert(filteredOperandPerformances)); + const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)); + const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)); return Capabilities{ - .relaxedFloat32toFloat16PerformanceScalar = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)), - .relaxedFloat32toFloat16PerformanceTensor = NN_TRY( - unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)), - .operandPerformance = NN_TRY(unvalidatedConvert(operandPerformance)), - .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)), - .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)), + .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor, + .operandPerformance = std::move(operandPerformance), + .ifPerformance = ifPerformance, + .whilePerformance = whilePerformance, }; } nn::GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert( const nn::Capabilities::OperandPerformance& operandPerformance) { + const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type)); + const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info)); return Capabilities::OperandPerformance{ - .type = NN_TRY(unvalidatedConvert(operandPerformance.type)), - .info = NN_TRY(unvalidatedConvert(operandPerformance.info)), + .type = type, + .info = info, }; } nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) { + const auto type = NN_TRY(unvalidatedConvert(operation.type)); return Operation{ - .type = NN_TRY(unvalidatedConvert(operation.type)), + .type = type, .inputs = operation.inputs, .outputs = operation.outputs, }; @@ -511,15 +537,19 @@ nn::GeneralResult<OperandLifeTime> unvalidatedConvert( } nn::GeneralResult<Operand> unvalidatedConvert(const nn::Operand& operand) { + const auto type = NN_TRY(unvalidatedConvert(operand.type)); + const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)); + const auto location = NN_TRY(unvalidatedConvert(operand.location)); + auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)); return Operand{ - .type = NN_TRY(unvalidatedConvert(operand.type)), + .type = type, .dimensions = operand.dimensions, .numberOfConsumers = 0, .scale = operand.scale, .zeroPoint = operand.zeroPoint, - .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)), - .location = NN_TRY(unvalidatedConvert(operand.location)), - .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)), + .lifetime = lifetime, + .location = location, + .extraParams = std::move(extraParams), }; } @@ -529,13 +559,18 @@ nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) { << "Model cannot be unvalidatedConverted because it contains pointer-based memory"; } + auto main = NN_TRY(unvalidatedConvert(model.main)); + auto referenced = NN_TRY(unvalidatedConvert(model.referenced)); + auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues)); + auto pools = NN_TRY(unvalidatedConvert(model.pools)); + auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)); return Model{ - .main = NN_TRY(unvalidatedConvert(model.main)), - .referenced = NN_TRY(unvalidatedConvert(model.referenced)), - .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)), - .pools = NN_TRY(unvalidatedConvert(model.pools)), + .main = std::move(main), + .referenced = std::move(referenced), + .operandValues = std::move(operandValues), + .pools = std::move(pools), .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)), + .extensionNameToPrefix = std::move(extensionNameToPrefix), }; } @@ -544,15 +579,16 @@ nn::GeneralResult<Subgraph> unvalidatedConvert(const nn::Model::Subgraph& subgra // Update number of consumers. const auto numberOfConsumers = - NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), subgraph.operations)); + NN_TRY(countNumberOfConsumers(operands.size(), subgraph.operations)); CHECK(operands.size() == numberOfConsumers.size()); for (size_t i = 0; i < operands.size(); ++i) { operands[i].numberOfConsumers = numberOfConsumers[i]; } + auto operations = NN_TRY(unvalidatedConvert(subgraph.operations)); return Subgraph{ .operands = std::move(operands), - .operations = NN_TRY(unvalidatedConvert(subgraph.operations)), + .operations = std::move(operations), .inputIndexes = subgraph.inputIndexes, .outputIndexes = subgraph.outputIndexes, }; @@ -576,10 +612,13 @@ nn::GeneralResult<Request> unvalidatedConvert(const nn::Request& request) { << "Request cannot be unvalidatedConverted because it contains pointer-based memory"; } + auto inputs = NN_TRY(unvalidatedConvert(request.inputs)); + auto outputs = NN_TRY(unvalidatedConvert(request.outputs)); + auto pools = NN_TRY(unvalidatedConvert(request.pools)); return Request{ - .inputs = NN_TRY(unvalidatedConvert(request.inputs)), - .outputs = NN_TRY(unvalidatedConvert(request.outputs)), - .pools = NN_TRY(unvalidatedConvert(request.pools)), + .inputs = std::move(inputs), + .outputs = std::move(outputs), + .pools = std::move(pools), }; } @@ -728,4 +767,13 @@ nn::GeneralResult<V1_2::Timing> convert(const nn::Timing& timing) { return V1_2::utils::convert(timing); } +nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences( + const std::vector<nn::SyncFence>& syncFences) { + std::vector<nn::SharedHandle> handles; + handles.reserve(syncFences.size()); + std::transform(syncFences.begin(), syncFences.end(), std::back_inserter(handles), + [](const nn::SyncFence& syncFence) { return syncFence.getSharedHandle(); }); + return convert(handles); +} + } // namespace android::hardware::neuralnetworks::V1_3::utils |