summaryrefslogtreecommitdiff
path: root/neuralnetworks/aidl/utils/src
diff options
context:
space:
mode:
Diffstat (limited to 'neuralnetworks/aidl/utils/src')
-rw-r--r--neuralnetworks/aidl/utils/src/Callbacks.cpp7
-rw-r--r--neuralnetworks/aidl/utils/src/Conversions.cpp173
-rw-r--r--neuralnetworks/aidl/utils/src/Device.cpp4
-rw-r--r--neuralnetworks/aidl/utils/src/Execution.cpp45
-rw-r--r--neuralnetworks/aidl/utils/src/PreparedModel.cpp158
5 files changed, 271 insertions, 116 deletions
diff --git a/neuralnetworks/aidl/utils/src/Callbacks.cpp b/neuralnetworks/aidl/utils/src/Callbacks.cpp
index 8084970690..554f3faa73 100644
--- a/neuralnetworks/aidl/utils/src/Callbacks.cpp
+++ b/neuralnetworks/aidl/utils/src/Callbacks.cpp
@@ -38,16 +38,17 @@ namespace {
// nn::kVersionFeatureLevel5. On failure, this function returns with the appropriate
// nn::GeneralError.
nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
- ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel) {
+ ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel,
+ nn::Version featureLevel) {
HANDLE_STATUS_AIDL(status) << "model preparation failed with " << toString(status);
- return NN_TRY(PreparedModel::create(preparedModel));
+ return NN_TRY(PreparedModel::create(preparedModel, featureLevel));
}
} // namespace
ndk::ScopedAStatus PreparedModelCallback::notify(
ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel) {
- mData.put(prepareModelCallback(status, preparedModel));
+ mData.put(prepareModelCallback(status, preparedModel, kFeatureLevel));
return ndk::ScopedAStatus::ok();
}
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp
index 45628c8e73..113d2da955 100644
--- a/neuralnetworks/aidl/utils/src/Conversions.cpp
+++ b/neuralnetworks/aidl/utils/src/Conversions.cpp
@@ -551,6 +551,10 @@ GeneralResult<SharedHandle> convert(const ndk::ScopedFileDescriptor& handle) {
return validatedConvert(handle);
}
+GeneralResult<BufferDesc> convert(const aidl_hal::BufferDesc& bufferDesc) {
+ return validatedConvert(bufferDesc);
+}
+
GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension) {
return validatedConvert(extension);
}
@@ -564,6 +568,15 @@ GeneralResult<std::vector<OutputShape>> convert(
return validatedConvert(outputShapes);
}
+GeneralResult<std::vector<SharedHandle>> convert(
+ const std::vector<ndk::ScopedFileDescriptor>& handles) {
+ return validatedConvert(handles);
+}
+
+GeneralResult<std::vector<BufferRole>> convert(const std::vector<aidl_hal::BufferRole>& roles) {
+ return validatedConvert(roles);
+}
+
GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec) {
if (!std::all_of(vec.begin(), vec.end(), [](int32_t v) { return v >= 0; })) {
return NN_ERROR() << "Negative value passed to conversion from signed to unsigned";
@@ -576,42 +589,7 @@ GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec)
namespace aidl::android::hardware::neuralnetworks::utils {
namespace {
-template <typename Input>
-using UnvalidatedConvertOutput =
- std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
-
-template <typename Type>
-nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvertVec(
- const std::vector<Type>& arguments) {
- std::vector<UnvalidatedConvertOutput<Type>> halObject;
- halObject.reserve(arguments.size());
- for (const auto& argument : arguments) {
- halObject.push_back(NN_TRY(unvalidatedConvert(argument)));
- }
- return halObject;
-}
-
-template <typename Type>
-nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
- const std::vector<Type>& arguments) {
- return unvalidatedConvertVec(arguments);
-}
-
-template <typename Type>
-nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
- NN_TRY(compliantVersion(canonical));
- return utils::unvalidatedConvert(canonical);
-}
-
-template <typename Type>
-nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
- const std::vector<Type>& arguments) {
- std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
- for (size_t i = 0; i < arguments.size(); ++i) {
- halObject[i] = NN_TRY(validatedConvert(arguments[i]));
- }
- return halObject;
-}
+using utils::unvalidatedConvert;
// Helper template for std::visit
template <class... Ts>
@@ -721,6 +699,74 @@ nn::GeneralResult<Memory> unvalidatedConvert(const nn::Memory::Unknown& /*memory
operator nn::GeneralResult<Memory>();
}
+nn::GeneralResult<PerformanceInfo> unvalidatedConvert(
+ const nn::Capabilities::PerformanceInfo& info) {
+ return PerformanceInfo{.execTime = info.execTime, .powerUsage = info.powerUsage};
+}
+
+nn::GeneralResult<OperandPerformance> unvalidatedConvert(
+ const nn::Capabilities::OperandPerformance& operandPerformance) {
+ return OperandPerformance{.type = NN_TRY(unvalidatedConvert(operandPerformance.type)),
+ .info = NN_TRY(unvalidatedConvert(operandPerformance.info))};
+}
+
+nn::GeneralResult<std::vector<OperandPerformance>> unvalidatedConvert(
+ const nn::Capabilities::OperandPerformanceTable& table) {
+ std::vector<OperandPerformance> operandPerformances;
+ operandPerformances.reserve(table.asVector().size());
+ for (const auto& operandPerformance : table.asVector()) {
+ operandPerformances.push_back(NN_TRY(unvalidatedConvert(operandPerformance)));
+ }
+ return operandPerformances;
+}
+
+nn::GeneralResult<ExtensionOperandTypeInformation> unvalidatedConvert(
+ const nn::Extension::OperandTypeInformation& info) {
+ return ExtensionOperandTypeInformation{.type = info.type,
+ .isTensor = info.isTensor,
+ .byteSize = static_cast<int32_t>(info.byteSize)};
+}
+
+nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration) {
+ if (duration < nn::Duration::zero()) {
+ return NN_ERROR() << "Unable to convert invalid (negative) duration";
+ }
+ constexpr std::chrono::nanoseconds::rep kIntMax = std::numeric_limits<int64_t>::max();
+ const auto count = duration.count();
+ return static_cast<int64_t>(std::min(count, kIntMax));
+}
+
+template <typename Input>
+using UnvalidatedConvertOutput =
+ std::decay_t<decltype(unvalidatedConvert(std::declval<Input>()).value())>;
+
+template <typename Type>
+nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> unvalidatedConvert(
+ const std::vector<Type>& arguments) {
+ std::vector<UnvalidatedConvertOutput<Type>> halObject;
+ halObject.reserve(arguments.size());
+ for (const auto& argument : arguments) {
+ halObject.push_back(NN_TRY(unvalidatedConvert(argument)));
+ }
+ return halObject;
+}
+
+template <typename Type>
+nn::GeneralResult<UnvalidatedConvertOutput<Type>> validatedConvert(const Type& canonical) {
+ NN_TRY(compliantVersion(canonical));
+ return utils::unvalidatedConvert(canonical);
+}
+
+template <typename Type>
+nn::GeneralResult<std::vector<UnvalidatedConvertOutput<Type>>> validatedConvert(
+ const std::vector<Type>& arguments) {
+ std::vector<UnvalidatedConvertOutput<Type>> halObject(arguments.size());
+ for (size_t i = 0; i < arguments.size(); ++i) {
+ halObject[i] = NN_TRY(validatedConvert(arguments[i]));
+ }
+ return halObject;
+}
+
} // namespace
nn::GeneralResult<std::vector<uint8_t>> unvalidatedConvert(const nn::CacheToken& cacheToken) {
@@ -743,6 +789,19 @@ nn::GeneralResult<BufferRole> unvalidatedConvert(const nn::BufferRole& bufferRol
};
}
+nn::GeneralResult<DeviceType> unvalidatedConvert(const nn::DeviceType& deviceType) {
+ switch (deviceType) {
+ case nn::DeviceType::UNKNOWN:
+ break;
+ case nn::DeviceType::OTHER:
+ case nn::DeviceType::CPU:
+ case nn::DeviceType::GPU:
+ case nn::DeviceType::ACCELERATOR:
+ return static_cast<DeviceType>(deviceType);
+ }
+ return NN_ERROR() << "Invalid DeviceType " << deviceType;
+}
+
nn::GeneralResult<bool> unvalidatedConvert(const nn::MeasureTiming& measureTiming) {
return measureTiming == nn::MeasureTiming::YES;
}
@@ -956,15 +1015,6 @@ nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing) {
};
}
-nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration) {
- if (duration < nn::Duration::zero()) {
- return NN_ERROR() << "Unable to convert invalid (negative) duration";
- }
- constexpr std::chrono::nanoseconds::rep kIntMax = std::numeric_limits<int64_t>::max();
- const auto count = duration.count();
- return static_cast<int64_t>(std::min(count, kIntMax));
-}
-
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalDuration& optionalDuration) {
if (!optionalDuration.has_value()) {
return kNoTiming;
@@ -989,6 +1039,23 @@ nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::Shared
return ndk::ScopedFileDescriptor(duplicatedFd.release());
}
+nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities) {
+ return Capabilities{
+ .relaxedFloat32toFloat16PerformanceTensor = NN_TRY(
+ unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceTensor)),
+ .relaxedFloat32toFloat16PerformanceScalar = NN_TRY(
+ unvalidatedConvert(capabilities.relaxedFloat32toFloat16PerformanceScalar)),
+ .operandPerformance = NN_TRY(unvalidatedConvert(capabilities.operandPerformance)),
+ .ifPerformance = NN_TRY(unvalidatedConvert(capabilities.ifPerformance)),
+ .whilePerformance = NN_TRY(unvalidatedConvert(capabilities.whilePerformance)),
+ };
+}
+
+nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension) {
+ return Extension{.name = extension.name,
+ .operandTypes = NN_TRY(unvalidatedConvert(extension.operandTypes))};
+}
+
nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken) {
return validatedConvert(cacheToken);
}
@@ -997,6 +1064,10 @@ nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc) {
return validatedConvert(bufferDesc);
}
+nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) {
+ return validatedConvert(deviceType);
+}
+
nn::GeneralResult<bool> convert(const nn::MeasureTiming& measureTiming) {
return validatedConvert(measureTiming);
}
@@ -1037,6 +1108,14 @@ nn::GeneralResult<int64_t> convert(const nn::OptionalTimePoint& outputShapes) {
return validatedConvert(outputShapes);
}
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) {
+ return validatedConvert(capabilities);
+}
+
+nn::GeneralResult<Extension> convert(const nn::Extension& extension) {
+ return validatedConvert(extension);
+}
+
nn::GeneralResult<std::vector<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles) {
return validatedConvert(bufferRoles);
}
@@ -1056,6 +1135,10 @@ nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
return validatedConvert(syncFences);
}
+nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions) {
+ return validatedConvert(extensions);
+}
+
nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec) {
if (!std::all_of(vec.begin(), vec.end(),
[](uint32_t v) { return v <= std::numeric_limits<int32_t>::max(); })) {
diff --git a/neuralnetworks/aidl/utils/src/Device.cpp b/neuralnetworks/aidl/utils/src/Device.cpp
index 5b7ec4ebd7..bad10ed347 100644
--- a/neuralnetworks/aidl/utils/src/Device.cpp
+++ b/neuralnetworks/aidl/utils/src/Device.cpp
@@ -229,7 +229,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModel(
const auto aidlDataCache = NN_TRY(convert(dataCache));
const auto aidlToken = NN_TRY(convert(token));
- const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>();
+ const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(kFeatureLevel);
const auto scoped = kDeathHandler.protectCallback(cb.get());
const auto ret = kDevice->prepareModel(aidlModel, aidlPreference, aidlPriority, aidlDeadline,
@@ -247,7 +247,7 @@ nn::GeneralResult<nn::SharedPreparedModel> Device::prepareModelFromCache(
const auto aidlDataCache = NN_TRY(convert(dataCache));
const auto aidlToken = NN_TRY(convert(token));
- const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>();
+ const auto cb = ndk::SharedRefBase::make<PreparedModelCallback>(kFeatureLevel);
const auto scoped = kDeathHandler.protectCallback(cb.get());
const auto ret = kDevice->prepareModelFromCache(aidlDeadline, aidlModelCache, aidlDataCache,
diff --git a/neuralnetworks/aidl/utils/src/Execution.cpp b/neuralnetworks/aidl/utils/src/Execution.cpp
index 94edd90c89..c4add636e5 100644
--- a/neuralnetworks/aidl/utils/src/Execution.cpp
+++ b/neuralnetworks/aidl/utils/src/Execution.cpp
@@ -35,36 +35,39 @@
namespace aidl::android::hardware::neuralnetworks::utils {
-nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
- std::shared_ptr<const PreparedModel> preparedModel, Request request,
- hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration) {
+nn::GeneralResult<std::shared_ptr<const ExecutionWithCachedRequest>>
+ExecutionWithCachedRequest::create(std::shared_ptr<const PreparedModel> preparedModel,
+ Request request, hal::utils::RequestRelocation relocation,
+ bool measure, int64_t loopTimeoutDuration) {
if (preparedModel == nullptr) {
- return NN_ERROR() << "aidl::utils::Execution::create must have non-null preparedModel";
+ return NN_ERROR() << "aidl::utils::ExecutionWithCachedRequest::create must have non-null "
+ "preparedModel";
}
- return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
- std::move(request), std::move(relocation), measure,
- loopTimeoutDuration);
+ return std::make_shared<const ExecutionWithCachedRequest>(
+ PrivateConstructorTag{}, std::move(preparedModel), std::move(request),
+ std::move(relocation), measure, loopTimeoutDuration);
}
-Execution::Execution(PrivateConstructorTag /*tag*/,
- std::shared_ptr<const PreparedModel> preparedModel, Request request,
- hal::utils::RequestRelocation relocation, bool measure,
- int64_t loopTimeoutDuration)
+ExecutionWithCachedRequest::ExecutionWithCachedRequest(
+ PrivateConstructorTag /*tag*/, std::shared_ptr<const PreparedModel> preparedModel,
+ Request request, hal::utils::RequestRelocation relocation, bool measure,
+ int64_t loopTimeoutDuration)
: kPreparedModel(std::move(preparedModel)),
kRequest(std::move(request)),
kRelocation(std::move(relocation)),
kMeasure(measure),
kLoopTimeoutDuration(loopTimeoutDuration) {}
-nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
- const nn::OptionalTimePoint& deadline) const {
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+ExecutionWithCachedRequest::compute(const nn::OptionalTimePoint& deadline) const {
const auto aidlDeadline = NN_TRY(convert(deadline));
return kPreparedModel->executeInternal(kRequest, kMeasure, aidlDeadline, kLoopTimeoutDuration,
kRelocation);
}
-nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ExecutionWithCachedRequest::computeFenced(
const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& timeoutDurationAfterFence) const {
const auto aidlWaitFor = NN_TRY(convert(waitFor));
@@ -75,4 +78,18 @@ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execu
aidlTimeoutDurationAfterFence, kRelocation);
}
+nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
+ std::shared_ptr<aidl_hal::IExecution> execution, hal::utils::RequestRelocation relocation) {
+ if (execution == nullptr) {
+ return NN_ERROR() << "aidl::utils::Execution::create must have non-null execution";
+ }
+
+ return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(execution),
+ std::move(relocation));
+}
+
+Execution::Execution(PrivateConstructorTag /*tag*/, std::shared_ptr<aidl_hal::IExecution> execution,
+ hal::utils::RequestRelocation relocation)
+ : kExecution(std::move(execution)), kRelocation(std::move(relocation)) {}
+
} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index f25c2c8825..6d1de569d0 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -54,21 +54,77 @@ nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionResul
return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced)));
}
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> handleExecutionResult(
+ const ExecutionResult& result, const hal::utils::RequestRelocation& relocation) {
+ if (!result.outputSufficientSize) {
+ auto canonicalOutputShapes =
+ nn::convert(result.outputShapes).value_or(std::vector<nn::OutputShape>{});
+ return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
+ << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+ }
+ auto [outputShapes, timing] =
+ NN_TRY(convertExecutionResults(result.outputShapes, result.timing));
+
+ if (relocation.output) {
+ relocation.output->flush();
+ }
+ return std::make_pair(std::move(outputShapes), timing);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+handleFencedExecutionResult(const FencedExecutionResult& result,
+ const hal::utils::RequestRelocation& relocation) {
+ auto resultSyncFence = nn::SyncFence::createAsSignaled();
+ if (result.syncFence.get() != -1) {
+ resultSyncFence = nn::SyncFence::create(NN_TRY(nn::convert(result.syncFence))).value();
+ }
+
+ auto callback = result.callback;
+ if (callback == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "callback is null";
+ }
+
+ // If computeFenced required the request memory to be moved into shared memory, block here until
+ // the fenced execution has completed and flush the memory back.
+ if (relocation.output) {
+ const auto state = resultSyncFence.syncWait({});
+ if (state != nn::SyncFence::FenceState::SIGNALED) {
+ return NN_ERROR() << "syncWait failed with " << state;
+ }
+ relocation.output->flush();
+ }
+
+ // Create callback which can be used to retrieve the execution error status and timings.
+ nn::ExecuteFencedInfoCallback resultCallback =
+ [callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> {
+ ErrorStatus errorStatus;
+ Timing timingLaunched;
+ Timing timingFenced;
+ const auto ret = callback->getExecutionInfo(&timingLaunched, &timingFenced, &errorStatus);
+ HANDLE_ASTATUS(ret) << "fenced execution callback getExecutionInfo failed";
+ return convertFencedExecutionResults(errorStatus, timingLaunched, timingFenced);
+ };
+
+ return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
+}
+
} // namespace
nn::GeneralResult<std::shared_ptr<const PreparedModel>> PreparedModel::create(
- std::shared_ptr<aidl_hal::IPreparedModel> preparedModel) {
+ std::shared_ptr<aidl_hal::IPreparedModel> preparedModel, nn::Version featureLevel) {
if (preparedModel == nullptr) {
return NN_ERROR()
<< "aidl_hal::utils::PreparedModel::create must have non-null preparedModel";
}
- return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel));
+ return std::make_shared<const PreparedModel>(PrivateConstructorTag{}, std::move(preparedModel),
+ featureLevel);
}
PreparedModel::PreparedModel(PrivateConstructorTag /*tag*/,
- std::shared_ptr<aidl_hal::IPreparedModel> preparedModel)
- : kPreparedModel(std::move(preparedModel)) {}
+ std::shared_ptr<aidl_hal::IPreparedModel> preparedModel,
+ nn::Version featureLevel)
+ : kPreparedModel(std::move(preparedModel)), kFeatureLevel(featureLevel) {}
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> PreparedModel::execute(
const nn::Request& request, nn::MeasureTiming measure,
@@ -101,19 +157,7 @@ PreparedModel::executeInternal(const Request& request, bool measure, int64_t dea
const auto ret = kPreparedModel->executeSynchronously(request, measure, deadline,
loopTimeoutDuration, &executionResult);
HANDLE_ASTATUS(ret) << "executeSynchronously failed";
- if (!executionResult.outputSufficientSize) {
- auto canonicalOutputShapes =
- nn::convert(executionResult.outputShapes).value_or(std::vector<nn::OutputShape>{});
- return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
- << "execution failed with " << nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
- }
- auto [outputShapes, timing] =
- NN_TRY(convertExecutionResults(executionResult.outputShapes, executionResult.timing));
-
- if (relocation.output) {
- relocation.output->flush();
- }
- return std::make_pair(std::move(outputShapes), timing);
+ return handleExecutionResult(executionResult, relocation);
}
nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
@@ -154,39 +198,7 @@ PreparedModel::executeFencedInternal(const Request& request,
kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
timeoutDurationAfterFence, &result);
HANDLE_ASTATUS(ret) << "executeFenced failed";
-
- auto resultSyncFence = nn::SyncFence::createAsSignaled();
- if (result.syncFence.get() != -1) {
- resultSyncFence = nn::SyncFence::create(NN_TRY(nn::convert(result.syncFence))).value();
- }
-
- auto callback = result.callback;
- if (callback == nullptr) {
- return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE) << "callback is null";
- }
-
- // If executeFenced required the request memory to be moved into shared memory, block here until
- // the fenced execution has completed and flush the memory back.
- if (relocation.output) {
- const auto state = resultSyncFence.syncWait({});
- if (state != nn::SyncFence::FenceState::SIGNALED) {
- return NN_ERROR() << "syncWait failed with " << state;
- }
- relocation.output->flush();
- }
-
- // Create callback which can be used to retrieve the execution error status and timings.
- nn::ExecuteFencedInfoCallback resultCallback =
- [callback]() -> nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> {
- ErrorStatus errorStatus;
- Timing timingLaunched;
- Timing timingFenced;
- const auto ret = callback->getExecutionInfo(&timingLaunched, &timingFenced, &errorStatus);
- HANDLE_ASTATUS(ret) << "fenced execution callback getExecutionInfo failed";
- return convertFencedExecutionResults(errorStatus, timingLaunched, timingFenced);
- };
-
- return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
+ return handleFencedExecutionResult(result, relocation);
}
nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
@@ -202,8 +214,18 @@ nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
auto aidlRequest = NN_TRY(convert(requestInShared));
auto aidlMeasure = NN_TRY(convert(measure));
auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
- return Execution::create(shared_from_this(), std::move(aidlRequest), std::move(relocation),
- aidlMeasure, aidlLoopTimeoutDuration);
+
+ if (kFeatureLevel.level >= nn::Version::Level::FEATURE_LEVEL_8) {
+ std::shared_ptr<IExecution> execution;
+ const auto ret = kPreparedModel->createReusableExecution(
+ aidlRequest, aidlMeasure, aidlLoopTimeoutDuration, &execution);
+ HANDLE_ASTATUS(ret) << "createReusableExecution failed";
+ return Execution::create(std::move(execution), std::move(relocation));
+ }
+
+ return ExecutionWithCachedRequest::create(shared_from_this(), std::move(aidlRequest),
+ std::move(relocation), aidlMeasure,
+ aidlLoopTimeoutDuration);
}
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
@@ -218,4 +240,36 @@ std::any PreparedModel::getUnderlyingResource() const {
return resource;
}
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
+ const nn::OptionalTimePoint& deadline) const {
+ const auto aidlDeadline = NN_TRY(convert(deadline));
+
+ if (kRelocation.input) {
+ kRelocation.input->flush();
+ }
+
+ ExecutionResult executionResult;
+ auto ret = kExecution->executeSynchronously(aidlDeadline, &executionResult);
+ HANDLE_ASTATUS(ret) << "executeSynchronously failed";
+ return handleExecutionResult(executionResult, kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ const auto aidlWaitFor = NN_TRY(convert(waitFor));
+ const auto aidlDeadline = NN_TRY(convert(deadline));
+ const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+
+ if (kRelocation.input) {
+ kRelocation.input->flush();
+ }
+
+ FencedExecutionResult result;
+ const auto ret = kExecution->executeFenced(aidlWaitFor, aidlDeadline,
+ aidlTimeoutDurationAfterFence, &result);
+ HANDLE_ASTATUS(ret) << "executeFenced failed";
+ return handleFencedExecutionResult(result, kRelocation);
+}
+
} // namespace aidl::android::hardware::neuralnetworks::utils