summaryrefslogtreecommitdiff
path: root/neuralnetworks/1.3/utils/src
diff options
context:
space:
mode:
Diffstat (limited to 'neuralnetworks/1.3/utils/src')
-rw-r--r--neuralnetworks/1.3/utils/src/Buffer.cpp6
-rw-r--r--neuralnetworks/1.3/utils/src/Callbacks.cpp13
-rw-r--r--neuralnetworks/1.3/utils/src/Conversions.cpp164
-rw-r--r--neuralnetworks/1.3/utils/src/Device.cpp18
-rw-r--r--neuralnetworks/1.3/utils/src/Execution.cpp5
-rw-r--r--neuralnetworks/1.3/utils/src/PreparedModel.cpp66
6 files changed, 157 insertions, 115 deletions
diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp
index ada526573b..34925eacfc 100644
--- a/neuralnetworks/1.3/utils/src/Buffer.cpp
+++ b/neuralnetworks/1.3/utils/src/Buffer.cpp
@@ -25,7 +25,7 @@
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <nnapi/hal/1.0/Conversions.h>
-#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/1.0/HandleError.h>
#include "Conversions.h"
#include "Utils.h"
@@ -66,7 +66,7 @@ nn::GeneralResult<void> Buffer::copyTo(const nn::SharedMemory& dst) const {
const auto ret = kBuffer->copyTo(hidlDst);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
- HANDLE_HAL_STATUS(status) << "IBuffer::copyTo failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "IBuffer::copyTo failed with " << toString(status);
return {};
}
@@ -78,7 +78,7 @@ nn::GeneralResult<void> Buffer::copyFrom(const nn::SharedMemory& src,
const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
- HANDLE_HAL_STATUS(status) << "IBuffer::copyFrom failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "IBuffer::copyFrom failed with " << toString(status);
return {};
}
diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp
index 8e9fb833b1..f0638626f6 100644
--- a/neuralnetworks/1.3/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp
@@ -30,13 +30,13 @@
#include <nnapi/Types.h>
#include <nnapi/hal/1.0/Callbacks.h>
#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.0/HandleError.h>
#include <nnapi/hal/1.0/PreparedModel.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
#include <nnapi/hal/1.2/Callbacks.h>
#include <nnapi/hal/1.2/Conversions.h>
#include <nnapi/hal/1.2/PreparedModel.h>
#include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
#include <nnapi/hal/TransferValue.h>
#include <utility>
@@ -71,13 +71,13 @@ convertExecutionGeneralResultsHelper(const hidl_vec<V1_2::OutputShape>& outputSh
nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
- HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "get supported operations failed with " << toString(status);
return supportedOperations;
}
nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
- HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
}
@@ -90,9 +90,8 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executi
return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
<< "execution failed with " << toString(status);
}
- HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
- return hal::utils::makeExecutionFailure(
- convertExecutionGeneralResultsHelper(outputShapes, timing));
+ HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status);
+ return convertExecutionGeneralResultsHelper(outputShapes, timing);
}
Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index e8a4f55afd..4eeb414dc8 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -28,7 +28,6 @@
#include <nnapi/hal/1.0/Conversions.h>
#include <nnapi/hal/1.2/Conversions.h>
#include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
#include <algorithm>
#include <chrono>
@@ -131,32 +130,38 @@ GeneralResult<Capabilities> unvalidatedConvert(const hal::V1_3::Capabilities& ca
}
auto operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance));
- auto table = NN_TRY(hal::utils::makeGeneralFailure(
- Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)),
- nn::ErrorStatus::GENERAL_FAILURE));
-
+ auto table =
+ NN_TRY(Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)));
+
+ const auto relaxedFloat32toFloat16PerformanceScalar =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ const auto relaxedFloat32toFloat16PerformanceTensor =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance));
+ const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance));
return Capabilities{
- .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
- .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
+ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor,
.operandPerformance = std::move(table),
- .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
- .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
+ .ifPerformance = ifPerformance,
+ .whilePerformance = whilePerformance,
};
}
GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
const hal::V1_3::Capabilities::OperandPerformance& operandPerformance) {
+ const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type));
+ const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info));
return Capabilities::OperandPerformance{
- .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
- .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
+ .type = type,
+ .info = info,
};
}
GeneralResult<Operation> unvalidatedConvert(const hal::V1_3::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -168,25 +173,34 @@ GeneralResult<Operand::LifeTime> unvalidatedConvert(
}
GeneralResult<Operand> unvalidatedConvert(const hal::V1_3::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
+ auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
+ .type = type,
.dimensions = operand.dimensions,
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
- .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
+ .lifetime = lifetime,
+ .location = location,
+ .extraParams = std::move(extraParams),
};
}
GeneralResult<Model> unvalidatedConvert(const hal::V1_3::Model& model) {
+ auto main = NN_TRY(unvalidatedConvert(model.main));
+ auto referenced = NN_TRY(unvalidatedConvert(model.referenced));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
+ auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix));
return Model{
- .main = NN_TRY(unvalidatedConvert(model.main)),
- .referenced = NN_TRY(unvalidatedConvert(model.referenced)),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .main = std::move(main),
+ .referenced = std::move(referenced),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
+ .extensionNameToPrefix = std::move(extensionNameToPrefix),
};
}
@@ -195,7 +209,7 @@ GeneralResult<Model::Subgraph> unvalidatedConvert(const hal::V1_3::Subgraph& sub
// Verify number of consumers.
const auto numberOfConsumers =
- NN_TRY(hal::utils::countNumberOfConsumers(subgraph.operands.size(), operations));
+ NN_TRY(countNumberOfConsumers(subgraph.operands.size(), operations));
CHECK(subgraph.operands.size() == numberOfConsumers.size());
for (size_t i = 0; i < subgraph.operands.size(); ++i) {
if (subgraph.operands[i].numberOfConsumers != numberOfConsumers[i]) {
@@ -206,8 +220,9 @@ GeneralResult<Model::Subgraph> unvalidatedConvert(const hal::V1_3::Subgraph& sub
}
}
+ auto operands = NN_TRY(unvalidatedConvert(subgraph.operands));
return Model::Subgraph{
- .operands = NN_TRY(unvalidatedConvert(subgraph.operands)),
+ .operands = std::move(operands),
.operations = std::move(operations),
.inputIndexes = subgraph.inputIndexes,
.outputIndexes = subgraph.outputIndexes,
@@ -227,10 +242,13 @@ GeneralResult<BufferRole> unvalidatedConvert(const hal::V1_3::BufferRole& buffer
}
GeneralResult<Request> unvalidatedConvert(const hal::V1_3::Request& request) {
+ auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
+ auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
+ auto pools = NN_TRY(unvalidatedConvert(request.pools));
return Request{
- .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
- .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
- .pools = NN_TRY(unvalidatedConvert(request.pools)),
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
+ .pools = std::move(pools),
};
}
@@ -239,7 +257,7 @@ GeneralResult<Request::MemoryPool> unvalidatedConvert(
using Discriminator = hal::V1_3::Request::MemoryPool::hidl_discriminator;
switch (memoryPool.getDiscriminator()) {
case Discriminator::hidlMemory:
- return hal::utils::createSharedMemoryFromHidlMemory(memoryPool.hidlMemory());
+ return unvalidatedConvert(memoryPool.hidlMemory());
case Discriminator::token:
return static_cast<Request::MemoryDomainToken>(memoryPool.token());
}
@@ -381,7 +399,7 @@ nn::GeneralResult<hidl_vec<uint8_t>> unvalidatedConvert(
}
nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle) {
- return V1_2::utils::unvalidatedConvert(handle);
+ return V1_0::utils::unvalidatedConvert(handle);
}
nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
@@ -398,7 +416,7 @@ nn::GeneralResult<V1_2::Operand::ExtraParams> unvalidatedConvert(
}
nn::GeneralResult<V1_2::Model::ExtensionNameAndPrefix> unvalidatedConvert(
- const nn::Model::ExtensionNameAndPrefix& extensionNameAndPrefix) {
+ const nn::ExtensionNameAndPrefix& extensionNameAndPrefix) {
return V1_2::utils::unvalidatedConvert(extensionNameAndPrefix);
}
@@ -465,37 +483,45 @@ nn::GeneralResult<Priority> unvalidatedConvert(const nn::Priority& priority) {
}
nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
- std::vector<nn::Capabilities::OperandPerformance> operandPerformance;
- operandPerformance.reserve(capabilities.operandPerformance.asVector().size());
+ std::vector<nn::Capabilities::OperandPerformance> filteredOperandPerformances;
+ filteredOperandPerformances.reserve(capabilities.operandPerformance.asVector().size());
std::copy_if(capabilities.operandPerformance.asVector().begin(),
capabilities.operandPerformance.asVector().end(),
- std::back_inserter(operandPerformance),
+ std::back_inserter(filteredOperandPerformances),
[](const nn::Capabilities::OperandPerformance& operandPerformance) {
return compliantVersion(operandPerformance.type).has_value();
});
+ const auto relaxedFloat32toFloat16PerformanceScalar =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ const auto relaxedFloat32toFloat16PerformanceTensor =
+ NN_TRY(unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ auto operandPerformance = NN_TRY(unvalidatedConvert(filteredOperandPerformances));
+ const auto ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance));
+ const auto whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance));
return Capabilities{
- .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
- .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
- unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
- .operandPerformance = NN_TRY(unvalidatedConvert(operandPerformance)),
- .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
- .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
+ .relaxedFloat32toFloat16PerformanceScalar = relaxedFloat32toFloat16PerformanceScalar,
+ .relaxedFloat32toFloat16PerformanceTensor = relaxedFloat32toFloat16PerformanceTensor,
+ .operandPerformance = std::move(operandPerformance),
+ .ifPerformance = ifPerformance,
+ .whilePerformance = whilePerformance,
};
}
nn::GeneralResult<Capabilities::OperandPerformance> unvalidatedConvert(
const nn::Capabilities::OperandPerformance& operandPerformance) {
+ const auto type = NN_TRY(unvalidatedConvert(operandPerformance.type));
+ const auto info = NN_TRY(unvalidatedConvert(operandPerformance.info));
return Capabilities::OperandPerformance{
- .type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
- .info = NN_TRY(unvalidatedConvert(operandPerformance.info)),
+ .type = type,
+ .info = info,
};
}
nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation) {
+ const auto type = NN_TRY(unvalidatedConvert(operation.type));
return Operation{
- .type = NN_TRY(unvalidatedConvert(operation.type)),
+ .type = type,
.inputs = operation.inputs,
.outputs = operation.outputs,
};
@@ -511,15 +537,19 @@ nn::GeneralResult<OperandLifeTime> unvalidatedConvert(
}
nn::GeneralResult<Operand> unvalidatedConvert(const nn::Operand& operand) {
+ const auto type = NN_TRY(unvalidatedConvert(operand.type));
+ const auto lifetime = NN_TRY(unvalidatedConvert(operand.lifetime));
+ const auto location = NN_TRY(unvalidatedConvert(operand.location));
+ auto extraParams = NN_TRY(unvalidatedConvert(operand.extraParams));
return Operand{
- .type = NN_TRY(unvalidatedConvert(operand.type)),
+ .type = type,
.dimensions = operand.dimensions,
.numberOfConsumers = 0,
.scale = operand.scale,
.zeroPoint = operand.zeroPoint,
- .lifetime = NN_TRY(unvalidatedConvert(operand.lifetime)),
- .location = NN_TRY(unvalidatedConvert(operand.location)),
- .extraParams = NN_TRY(unvalidatedConvert(operand.extraParams)),
+ .lifetime = lifetime,
+ .location = location,
+ .extraParams = std::move(extraParams),
};
}
@@ -529,13 +559,18 @@ nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) {
<< "Model cannot be unvalidatedConverted because it contains pointer-based memory";
}
+ auto main = NN_TRY(unvalidatedConvert(model.main));
+ auto referenced = NN_TRY(unvalidatedConvert(model.referenced));
+ auto operandValues = NN_TRY(unvalidatedConvert(model.operandValues));
+ auto pools = NN_TRY(unvalidatedConvert(model.pools));
+ auto extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix));
return Model{
- .main = NN_TRY(unvalidatedConvert(model.main)),
- .referenced = NN_TRY(unvalidatedConvert(model.referenced)),
- .operandValues = NN_TRY(unvalidatedConvert(model.operandValues)),
- .pools = NN_TRY(unvalidatedConvert(model.pools)),
+ .main = std::move(main),
+ .referenced = std::move(referenced),
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
.relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
- .extensionNameToPrefix = NN_TRY(unvalidatedConvert(model.extensionNameToPrefix)),
+ .extensionNameToPrefix = std::move(extensionNameToPrefix),
};
}
@@ -544,15 +579,16 @@ nn::GeneralResult<Subgraph> unvalidatedConvert(const nn::Model::Subgraph& subgra
// Update number of consumers.
const auto numberOfConsumers =
- NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), subgraph.operations));
+ NN_TRY(countNumberOfConsumers(operands.size(), subgraph.operations));
CHECK(operands.size() == numberOfConsumers.size());
for (size_t i = 0; i < operands.size(); ++i) {
operands[i].numberOfConsumers = numberOfConsumers[i];
}
+ auto operations = NN_TRY(unvalidatedConvert(subgraph.operations));
return Subgraph{
.operands = std::move(operands),
- .operations = NN_TRY(unvalidatedConvert(subgraph.operations)),
+ .operations = std::move(operations),
.inputIndexes = subgraph.inputIndexes,
.outputIndexes = subgraph.outputIndexes,
};
@@ -576,10 +612,13 @@ nn::GeneralResult<Request> unvalidatedConvert(const nn::Request& request) {
<< "Request cannot be unvalidatedConverted because it contains pointer-based memory";
}
+ auto inputs = NN_TRY(unvalidatedConvert(request.inputs));
+ auto outputs = NN_TRY(unvalidatedConvert(request.outputs));
+ auto pools = NN_TRY(unvalidatedConvert(request.pools));
return Request{
- .inputs = NN_TRY(unvalidatedConvert(request.inputs)),
- .outputs = NN_TRY(unvalidatedConvert(request.outputs)),
- .pools = NN_TRY(unvalidatedConvert(request.pools)),
+ .inputs = std::move(inputs),
+ .outputs = std::move(outputs),
+ .pools = std::move(pools),
};
}
@@ -728,4 +767,13 @@ nn::GeneralResult<V1_2::Timing> convert(const nn::Timing& timing) {
return V1_2::utils::convert(timing);
}
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+ const std::vector<nn::SyncFence>& syncFences) {
+ std::vector<nn::SharedHandle> handles;
+ handles.reserve(syncFences.size());
+ std::transform(syncFences.begin(), syncFences.end(), std::back_inserter(handles),
+ [](const nn::SyncFence& syncFence) { return syncFence.getSharedHandle(); });
+ return convert(handles);
+}
+
} // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp
index d710b85070..824cec61c8 100644
--- a/neuralnetworks/1.3/utils/src/Device.cpp
+++ b/neuralnetworks/1.3/utils/src/Device.cpp
@@ -33,13 +33,13 @@
#include <nnapi/OperandTypes.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/HandleError.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
#include <nnapi/hal/1.1/Conversions.h>
#include <nnapi/hal/1.2/Conversions.h>
#include <nnapi/hal/1.2/Device.h>
#include <nnapi/hal/1.2/Utils.h>
#include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
#include <any>
#include <functional>
@@ -72,7 +72,7 @@ nn::GeneralResult<hidl_vec<sp<IPreparedModel>>> convert(
nn::GeneralResult<nn::Capabilities> capabilitiesCallback(ErrorStatus status,
const Capabilities& capabilities) {
- HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "getting capabilities failed with " << toString(status);
return nn::convert(capabilities);
}
@@ -89,7 +89,7 @@ nn::GeneralResult<nn::Capabilities> getCapabilitiesFrom(V1_3::IDevice* device) {
nn::GeneralResult<nn::SharedBuffer> allocationCallback(ErrorStatus status,
const sp<IBuffer>& buffer, uint32_t token) {
- HANDLE_HAL_STATUS(status) << "IDevice::allocate failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "IDevice::allocate failed with " << toString(status);
return Buffer::create(buffer, static_cast<nn::Request::MemoryDomainToken>(token));
}
@@ -143,7 +143,7 @@ const std::string& Device::getVersionString() const {
}
nn::Version Device::getFeatureLevel() const {
- return nn::Version::ANDROID_R;
+ return kVersion;
}
nn::DeviceType Device::getType() const {
@@ -187,7 +187,9 @@ nn::GeneralResult<std::vector<bool>> Device::getSupportedOperations(const nn::Mo
nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
- const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+ const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that model is ready for IPC.
std::optional<nn::Model> maybeModelInShared;
const nn::Model& modelInShared =
@@ -208,7 +210,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline,
hidlModelCache, hidlDataCache, hidlToken, cb);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
- HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
return cb->get();
}
@@ -227,7 +229,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache,
hidlToken, cb);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
- HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "model preparation from cache failed with " << toString(status);
return cb->get();
}
diff --git a/neuralnetworks/1.3/utils/src/Execution.cpp b/neuralnetworks/1.3/utils/src/Execution.cpp
index 3d17cc3ef4..0ec7f56d37 100644
--- a/neuralnetworks/1.3/utils/src/Execution.cpp
+++ b/neuralnetworks/1.3/utils/src/Execution.cpp
@@ -29,7 +29,6 @@
#include <nnapi/Result.h>
#include <nnapi/Types.h>
#include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
#include <memory>
#include <utility>
@@ -65,7 +64,7 @@ Execution::Execution(PrivateConstructorTag /*tag*/,
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
const nn::OptionalTimePoint& deadline) const {
- const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+ const auto hidlDeadline = NN_TRY(convert(deadline));
return kPreparedModel->executeInternal(kRequest, kMeasure, hidlDeadline, kLoopTimeoutDuration,
kRelocation);
}
@@ -73,7 +72,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Executi
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& timeoutDurationAfterFence) const {
- const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
+ const auto hidlWaitFor = NN_TRY(convertSyncFences(waitFor));
const auto hidlDeadline = NN_TRY(convert(deadline));
const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
return kPreparedModel->executeFencedInternal(kRequest, hidlWaitFor, kMeasure, hidlDeadline,
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index 1623de5e9f..b92f877a51 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -30,12 +30,12 @@
#include <nnapi/Result.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
+#include <nnapi/hal/1.0/HandleError.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
+#include <nnapi/hal/1.2/Burst.h>
+#include <nnapi/hal/1.2/BurstUtils.h>
#include <nnapi/hal/1.2/Conversions.h>
-#include <nnapi/hal/1.2/ExecutionBurstController.h>
-#include <nnapi/hal/1.2/ExecutionBurstUtils.h>
#include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
#include <memory>
#include <tuple>
@@ -50,20 +50,19 @@ namespace {
nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionCallbackResults(
ErrorStatus status, const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) {
- HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "fenced execution callback info failed with " << toString(status);
return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced)));
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> fencedExecutionCallback(
ErrorStatus status, const hidl_handle& syncFence,
const sp<IFencedExecutionCallback>& callback) {
- HANDLE_HAL_STATUS(status) << "fenced execution failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "fenced execution failed with " << toString(status);
auto resultSyncFence = nn::SyncFence::createAsSignaled();
if (syncFence.getNativeHandle() != nullptr) {
auto sharedHandle = NN_TRY(nn::convert(syncFence));
- resultSyncFence = NN_TRY(hal::utils::makeGeneralFailure(
- nn::SyncFence::create(std::move(sharedHandle)), nn::ErrorStatus::GENERAL_FAILURE));
+ resultSyncFence = NN_TRY(nn::SyncFence::create(std::move(sharedHandle)));
}
if (callback == nullptr) {
@@ -128,7 +127,7 @@ PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming
kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
if (status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
- HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
+ HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status);
}
return cb->get();
@@ -136,21 +135,20 @@ PreparedModel::executeAsynchronously(const Request& request, V1_2::MeasureTiming
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalTimePoint& deadline, const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
- const nn::Request& requestInShared =
- NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
- &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
- &maybeRequestInShared, &relocation)));
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
- const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
- const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
- const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
- const auto hidlLoopTimeoutDuration =
- NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+ const auto hidlRequest = NN_TRY(convert(requestInShared));
+ const auto hidlMeasure = NN_TRY(convert(measure));
+ const auto hidlDeadline = NN_TRY(convert(deadline));
+ const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
return executeInternal(hidlRequest, hidlMeasure, hidlDeadline, hidlLoopTimeoutDuration,
relocation);
@@ -177,10 +175,13 @@ PreparedModel::executeInternal(const Request& request, V1_2::MeasureTiming measu
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
-PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
- nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration,
- const nn::OptionalDuration& timeoutDurationAfterFence) const {
+PreparedModel::executeFenced(
+ const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+ nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const nn::OptionalDuration& timeoutDurationAfterFence,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -189,7 +190,7 @@ PreparedModel::executeFenced(const nn::Request& request, const std::vector<nn::S
&maybeRequestInShared, &relocation));
const auto hidlRequest = NN_TRY(convert(requestInShared));
- const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
+ const auto hidlWaitFor = NN_TRY(convertSyncFences(waitFor));
const auto hidlMeasure = NN_TRY(convert(measure));
const auto hidlDeadline = NN_TRY(convert(deadline));
const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
@@ -233,7 +234,9 @@ PreparedModel::executeFencedInternal(const Request& request, const hidl_vec<hidl
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalDuration& loopTimeoutDuration) const {
+ const nn::OptionalDuration& loopTimeoutDuration,
+ const std::vector<nn::TokenValuePair>& /*hints*/,
+ const std::vector<nn::ExtensionNameAndPrefix>& /*extensionNameToPrefix*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
hal::utils::RequestRelocation relocation;
@@ -249,17 +252,8 @@ nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
}
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
- auto self = shared_from_this();
- auto fallback = [preparedModel = std::move(self)](
- const nn::Request& request, nn::MeasureTiming measure,
- const nn::OptionalTimePoint& deadline,
- const nn::OptionalDuration& loopTimeoutDuration)
- -> nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> {
- return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
- };
const auto pollingTimeWindow = V1_2::utils::getBurstControllerPollingTimeWindow();
- return V1_2::utils::ExecutionBurstController::create(shared_from_this(), kPreparedModel,
- pollingTimeWindow);
+ return V1_2::utils::Burst::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
}
std::any PreparedModel::getUnderlyingResource() const {