summaryrefslogtreecommitdiff
path: root/neuralnetworks/aidl/utils/include
diff options
context:
space:
mode:
authorHaamed Gheibi <haamed@google.com>2022-02-04 13:47:26 -0800
committerHaamed Gheibi <haamed@google.com>2022-02-04 13:55:47 -0800
commitf99b35c293439db0b7436b47b939eb8c7bf21b51 (patch)
tree6cd9b0719554809447c845616317cca5409b93ae /neuralnetworks/aidl/utils/include
parenta028272dee9220e6810cbdcfb2328c34f8afe4c2 (diff)
parent332dead340bb196c6ba3f6978e8fb53966c74bf7 (diff)
Merge TP1A.220120.003
Change-Id: Ie5eba313ee102e452f5f96942ed2f3a7bb4e8f01
Diffstat (limited to 'neuralnetworks/aidl/utils/include')
-rw-r--r--neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h3
-rw-r--r--neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h12
-rw-r--r--neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h41
-rw-r--r--neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h5
-rw-r--r--neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h6
-rw-r--r--neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h2
6 files changed, 61 insertions, 8 deletions
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h
index 168264babf..960be2bb54 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h
@@ -36,6 +36,8 @@ class PreparedModelCallback final : public BnPreparedModelCallback, public IProt
public:
using Data = nn::GeneralResult<nn::SharedPreparedModel>;
+ PreparedModelCallback(nn::Version featureLevel) : kFeatureLevel(featureLevel) {}
+
ndk::ScopedAStatus notify(ErrorStatus status,
const std::shared_ptr<IPreparedModel>& preparedModel) override;
@@ -44,6 +46,7 @@ class PreparedModelCallback final : public BnPreparedModelCallback, public IProt
Data get();
private:
+ const nn::Version kFeatureLevel;
hal::utils::TransferValue<Data> mData;
};
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
index 78433a74e9..477b311598 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Conversions.h
@@ -112,11 +112,15 @@ GeneralResult<Priority> convert(const aidl_hal::Priority& priority);
GeneralResult<Request> convert(const aidl_hal::Request& request);
GeneralResult<Timing> convert(const aidl_hal::Timing& timing);
GeneralResult<SharedHandle> convert(const ndk::ScopedFileDescriptor& handle);
+GeneralResult<BufferDesc> convert(const aidl_hal::BufferDesc& bufferDesc);
GeneralResult<std::vector<Extension>> convert(const std::vector<aidl_hal::Extension>& extension);
GeneralResult<std::vector<SharedMemory>> convert(const std::vector<aidl_hal::Memory>& memories);
GeneralResult<std::vector<OutputShape>> convert(
const std::vector<aidl_hal::OutputShape>& outputShapes);
+GeneralResult<std::vector<SharedHandle>> convert(
+ const std::vector<ndk::ScopedFileDescriptor>& handles);
+GeneralResult<std::vector<BufferRole>> convert(const std::vector<aidl_hal::BufferRole>& roles);
GeneralResult<std::vector<uint32_t>> toUnsigned(const std::vector<int32_t>& vec);
@@ -129,6 +133,7 @@ namespace nn = ::android::nn;
nn::GeneralResult<std::vector<uint8_t>> unvalidatedConvert(const nn::CacheToken& cacheToken);
nn::GeneralResult<BufferDesc> unvalidatedConvert(const nn::BufferDesc& bufferDesc);
nn::GeneralResult<BufferRole> unvalidatedConvert(const nn::BufferRole& bufferRole);
+nn::GeneralResult<DeviceType> unvalidatedConvert(const nn::DeviceType& deviceType);
nn::GeneralResult<bool> unvalidatedConvert(const nn::MeasureTiming& measureTiming);
nn::GeneralResult<Memory> unvalidatedConvert(const nn::SharedMemory& memory);
nn::GeneralResult<OutputShape> unvalidatedConvert(const nn::OutputShape& outputShape);
@@ -154,14 +159,16 @@ nn::GeneralResult<Request> unvalidatedConvert(const nn::Request& request);
nn::GeneralResult<RequestArgument> unvalidatedConvert(const nn::Request::Argument& requestArgument);
nn::GeneralResult<RequestMemoryPool> unvalidatedConvert(const nn::Request::MemoryPool& memoryPool);
nn::GeneralResult<Timing> unvalidatedConvert(const nn::Timing& timing);
-nn::GeneralResult<int64_t> unvalidatedConvert(const nn::Duration& duration);
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalDuration& optionalDuration);
nn::GeneralResult<int64_t> unvalidatedConvert(const nn::OptionalTimePoint& optionalTimePoint);
nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::SyncFence& syncFence);
nn::GeneralResult<ndk::ScopedFileDescriptor> unvalidatedConvert(const nn::SharedHandle& handle);
+nn::GeneralResult<Capabilities> unvalidatedConvert(const nn::Capabilities& capabilities);
+nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension);
nn::GeneralResult<std::vector<uint8_t>> convert(const nn::CacheToken& cacheToken);
nn::GeneralResult<BufferDesc> convert(const nn::BufferDesc& bufferDesc);
+nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType);
nn::GeneralResult<bool> convert(const nn::MeasureTiming& measureTiming);
nn::GeneralResult<Memory> convert(const nn::SharedMemory& memory);
nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& errorStatus);
@@ -172,6 +179,8 @@ nn::GeneralResult<Request> convert(const nn::Request& request);
nn::GeneralResult<Timing> convert(const nn::Timing& timing);
nn::GeneralResult<int64_t> convert(const nn::OptionalDuration& optionalDuration);
nn::GeneralResult<int64_t> convert(const nn::OptionalTimePoint& optionalTimePoint);
+nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
+nn::GeneralResult<Extension> convert(const nn::Extension& extension);
nn::GeneralResult<std::vector<BufferRole>> convert(const std::vector<nn::BufferRole>& bufferRoles);
nn::GeneralResult<std::vector<OutputShape>> convert(
@@ -180,6 +189,7 @@ nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
const std::vector<nn::SharedHandle>& handles);
nn::GeneralResult<std::vector<ndk::ScopedFileDescriptor>> convert(
const std::vector<nn::SyncFence>& syncFences);
+nn::GeneralResult<std::vector<Extension>> convert(const std::vector<nn::Extension>& extensions);
nn::GeneralResult<std::vector<int32_t>> toSigned(const std::vector<uint32_t>& vec);
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
index a77ea984b2..14802b98cc 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
+#include <aidl/android/hardware/neuralnetworks/IExecution.h>
+
#include <nnapi/IExecution.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
@@ -33,17 +35,22 @@
namespace aidl::android::hardware::neuralnetworks::utils {
-class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+// A reusable execution implementation with a cached Request, internally it is still passing the
+// request to the driver in every computation.
+class ExecutionWithCachedRequest final
+ : public nn::IExecution,
+ public std::enable_shared_from_this<ExecutionWithCachedRequest> {
struct PrivateConstructorTag {};
public:
- static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ static nn::GeneralResult<std::shared_ptr<const ExecutionWithCachedRequest>> create(
std::shared_ptr<const PreparedModel> preparedModel, Request request,
hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration);
- Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
- Request request, hal::utils::RequestRelocation relocation, bool measure,
- int64_t loopTimeoutDuration);
+ ExecutionWithCachedRequest(PrivateConstructorTag tag,
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, bool measure,
+ int64_t loopTimeoutDuration);
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
const nn::OptionalTimePoint& deadline) const override;
@@ -60,6 +67,30 @@ class Execution final : public nn::IExecution, public std::enable_shared_from_th
const int64_t kLoopTimeoutDuration;
};
+// A reusable execution implementation that is backed by an actual AIDL IExecution object.
+class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ std::shared_ptr<aidl_hal::IExecution> execution,
+ hal::utils::RequestRelocation relocation);
+
+ Execution(PrivateConstructorTag tag, std::shared_ptr<aidl_hal::IExecution> execution,
+ hal::utils::RequestRelocation relocation);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<aidl_hal::IExecution> kExecution;
+ const hal::utils::RequestRelocation kRelocation;
+};
+
} // namespace aidl::android::hardware::neuralnetworks::utils
#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
index 3fb443c388..205d428cf4 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/HalInterfaces.h
@@ -61,6 +61,11 @@
#include <aidl/android/hardware/neuralnetworks/SymmPerChannelQuantParams.h>
#include <aidl/android/hardware/neuralnetworks/Timing.h>
+#ifdef NN_AIDL_V4_OR_ABOVE
+#include <aidl/android/hardware/neuralnetworks/BnExecution.h>
+#include <aidl/android/hardware/neuralnetworks/IExecution.h>
+#endif // NN_AIDL_V4_OR_ABOVE
+
namespace android::nn {
namespace aidl_hal = ::aidl::android::hardware::neuralnetworks;
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
index 4035764ea4..24cd681658 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
@@ -41,10 +41,11 @@ class PreparedModel final : public nn::IPreparedModel,
public:
static nn::GeneralResult<std::shared_ptr<const PreparedModel>> create(
- std::shared_ptr<aidl_hal::IPreparedModel> preparedModel);
+ std::shared_ptr<aidl_hal::IPreparedModel> preparedModel, nn::Version featureLevel);
PreparedModel(PrivateConstructorTag tag,
- std::shared_ptr<aidl_hal::IPreparedModel> preparedModel);
+ std::shared_ptr<aidl_hal::IPreparedModel> preparedModel,
+ nn::Version featureLevel);
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
const nn::Request& request, nn::MeasureTiming measure,
@@ -78,6 +79,7 @@ class PreparedModel final : public nn::IPreparedModel,
private:
const std::shared_ptr<aidl_hal::IPreparedModel> kPreparedModel;
+ const nn::Version kFeatureLevel;
};
} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
index a27487e17c..beca38b1ee 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
@@ -38,6 +38,8 @@ constexpr std::optional<nn::Version> aidlVersionToCanonicalVersion(int aidlVersi
return nn::kVersionFeatureLevel6;
case 3:
return nn::kVersionFeatureLevel7;
+ case 4:
+ return nn::kVersionFeatureLevel8;
default:
return std::nullopt;
}