diff options
author | Scott Lobdell <slobdell@google.com> | 2022-01-21 07:03:32 +0000 |
---|---|---|
committer | Scott Lobdell <slobdell@google.com> | 2022-01-21 07:15:05 +0000 |
commit | 593bdf5548ac6f13053424a655423c0677e7cb2d (patch) | |
tree | ebbbe99553ec26ed9088cf6cfef1aec2e5cc2ef0 /neuralnetworks | |
parent | 78c23e4f5f9e8b61b8bace9a2b4cb691073ef906 (diff) | |
parent | a0f5ebdbd3992e7a78517d13f568d9cbcbb71564 (diff) |
Merge TP1A.211206.001
Change-Id: I8b3a43021a3328cf6afb4e7e157339d8e214ddbe
Diffstat (limited to 'neuralnetworks')
24 files changed, 140 insertions, 41 deletions
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h index 1ab9dcb90a..244001f45b 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h @@ -41,7 +41,7 @@ nn::GeneralResult<std::vector<bool>> supportedOperationsCallback( // Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this // function returns with a non-null nn::SharedPreparedModel with a feature level of -// nn::Version::ANDROID_OC_MR1. On failure, this function returns with the appropriate +// nn::kVersionFeatureLevel1. On failure, this function returns with the appropriate // nn::GeneralError. nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback( ErrorStatus status, const sp<IPreparedModel>& preparedModel); diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h index 5c1480e83b..7710a7eaa2 100644 --- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h +++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h @@ -28,7 +28,7 @@ namespace android::hardware::neuralnetworks::V1_0::utils { -constexpr auto kVersion = nn::Version::ANDROID_OC_MR1; +constexpr auto kVersion = nn::kVersionFeatureLevel1; template <typename Type> nn::Result<void> validate(const Type& halObject) { @@ -51,7 +51,7 @@ bool valid(const Type& halObject) { template <typename Type> nn::Result<void> compliantVersion(const Type& canonical) { const auto version = NN_TRY(nn::validate(canonical)); - if (version > kVersion) { + if (!nn::isCompliantVersion(version, kVersion)) { return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion; } return {}; diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp index 49913a2584..b0c236efe8 100644 --- a/neuralnetworks/1.0/utils/src/Device.cpp +++ b/neuralnetworks/1.0/utils/src/Device.cpp @@ -99,7 +99,7 @@ const std::string& Device::getVersionString() const { } nn::Version Device::getFeatureLevel() const { - return nn::Version::ANDROID_OC_MR1; + return kVersion; } nn::DeviceType Device::getType() const { diff --git a/neuralnetworks/1.0/utils/test/DeviceTest.cpp b/neuralnetworks/1.0/utils/test/DeviceTest.cpp index e881da2c85..83e555fad5 100644 --- a/neuralnetworks/1.0/utils/test/DeviceTest.cpp +++ b/neuralnetworks/1.0/utils/test/DeviceTest.cpp @@ -233,7 +233,7 @@ TEST(DeviceTest, getFeatureLevel) { const auto featureLevel = device->getFeatureLevel(); // verify result - EXPECT_EQ(featureLevel, nn::Version::ANDROID_OC_MR1); + EXPECT_EQ(featureLevel, nn::kVersionFeatureLevel1); } TEST(DeviceTest, getCachedData) { diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h index 4660ff732f..ff06739f32 100644 --- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h +++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h @@ -30,7 +30,7 @@ namespace android::hardware::neuralnetworks::V1_1::utils { constexpr auto kDefaultExecutionPreference = ExecutionPreference::FAST_SINGLE_ANSWER; -constexpr auto kVersion = nn::Version::ANDROID_P; +constexpr auto kVersion = nn::kVersionFeatureLevel2; template <typename Type> nn::Result<void> validate(const Type& halObject) { @@ -53,7 +53,7 @@ bool valid(const Type& halObject) { template <typename Type> nn::Result<void> compliantVersion(const Type& canonical) { const auto version = NN_TRY(nn::validate(canonical)); - if (version > kVersion) { + if (!nn::isCompliantVersion(version, kVersion)) { return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion; } return {}; diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp index 7d54cabeb9..3effa8428d 100644 --- a/neuralnetworks/1.1/utils/src/Device.cpp +++ b/neuralnetworks/1.1/utils/src/Device.cpp @@ -99,7 +99,7 @@ const std::string& Device::getVersionString() const { } nn::Version Device::getFeatureLevel() const { - return nn::Version::ANDROID_P; + return kVersion; } nn::DeviceType Device::getType() const { diff --git a/neuralnetworks/1.1/utils/test/DeviceTest.cpp b/neuralnetworks/1.1/utils/test/DeviceTest.cpp index 41e0e3050d..2248da6ffe 100644 --- a/neuralnetworks/1.1/utils/test/DeviceTest.cpp +++ b/neuralnetworks/1.1/utils/test/DeviceTest.cpp @@ -243,7 +243,7 @@ TEST(DeviceTest, getFeatureLevel) { const auto featureLevel = device->getFeatureLevel(); // verify result - EXPECT_EQ(featureLevel, nn::Version::ANDROID_P); + EXPECT_EQ(featureLevel, nn::kVersionFeatureLevel2); } TEST(DeviceTest, getCachedData) { diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h index 6dd8138f64..fc04303726 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h @@ -38,7 +38,8 @@ namespace android::hardware::neuralnetworks::V1_2::utils { // Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this // function returns with a non-null nn::SharedPreparedModel with a feature level of -// nn::Version::ANDROID_Q. On failure, this function returns with the appropriate nn::GeneralError. +// nn::kVersionFeatureLevel3. On failure, this function returns with the appropriate +// nn::GeneralError. nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback( V1_0::ErrorStatus status, const sp<IPreparedModel>& preparedModel); diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h index 23e336a69a..a06f2ac241 100644 --- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h +++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h @@ -39,7 +39,7 @@ using V1_1::utils::kDefaultExecutionPreference; constexpr auto kDefaultMesaureTiming = MeasureTiming::NO; constexpr auto kNoTiming = Timing{.timeOnDevice = std::numeric_limits<uint64_t>::max(), .timeInDriver = std::numeric_limits<uint64_t>::max()}; -constexpr auto kVersion = nn::Version::ANDROID_Q; +constexpr auto kVersion = nn::kVersionFeatureLevel3; template <typename Type> nn::Result<void> validate(const Type& halObject) { @@ -62,7 +62,7 @@ bool valid(const Type& halObject) { template <typename Type> nn::Result<void> compliantVersion(const Type& canonical) { const auto version = NN_TRY(nn::validate(canonical)); - if (version > kVersion) { + if (!nn::isCompliantVersion(version, kVersion)) { return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion; } return {}; diff --git a/neuralnetworks/1.2/utils/src/Burst.cpp b/neuralnetworks/1.2/utils/src/Burst.cpp index e0a23f1635..911fbfa981 100644 --- a/neuralnetworks/1.2/utils/src/Burst.cpp +++ b/neuralnetworks/1.2/utils/src/Burst.cpp @@ -315,7 +315,7 @@ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst:: // if the request is valid but of a higher version than what's supported in burst execution, // fall back to another execution path - if (const auto version = NN_TRY(nn::validate(request)); version > nn::Version::ANDROID_Q) { + if (!compliantVersion(request).ok()) { // fallback to another execution path if the packet could not be sent return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration); } @@ -359,7 +359,7 @@ nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution( // if the request is valid but of a higher version than what's supported in burst execution, // fall back to another execution path - if (const auto version = NN_TRY(nn::validate(request)); version > nn::Version::ANDROID_Q) { + if (!compliantVersion(request).ok()) { // fallback to another execution path if the packet could not be sent return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration); } diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp index f12669a6dc..e7acecdf7a 100644 --- a/neuralnetworks/1.2/utils/src/Device.cpp +++ b/neuralnetworks/1.2/utils/src/Device.cpp @@ -192,7 +192,7 @@ const std::string& Device::getVersionString() const { } nn::Version Device::getFeatureLevel() const { - return nn::Version::ANDROID_Q; + return kVersion; } nn::DeviceType Device::getType() const { diff --git a/neuralnetworks/1.2/utils/test/DeviceTest.cpp b/neuralnetworks/1.2/utils/test/DeviceTest.cpp index 215d44c83f..1dc6285be5 100644 --- a/neuralnetworks/1.2/utils/test/DeviceTest.cpp +++ b/neuralnetworks/1.2/utils/test/DeviceTest.cpp @@ -483,7 +483,7 @@ TEST(DeviceTest, getFeatureLevel) { const auto featureLevel = device->getFeatureLevel(); // verify result - EXPECT_EQ(featureLevel, nn::Version::ANDROID_Q); + EXPECT_EQ(featureLevel, nn::kVersionFeatureLevel3); } TEST(DeviceTest, getCachedData) { diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h index 4b8ddc1885..10892bc403 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h @@ -47,7 +47,8 @@ nn::GeneralResult<std::vector<bool>> supportedOperationsCallback( // Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this // function returns with a non-null nn::SharedPreparedModel with a feature level of -// nn::Version::ANDROID_R. On failure, this function returns with the appropriate nn::GeneralError. +// nn::kVersionFeatureLevel4. On failure, this function returns with the appropriate +// nn::GeneralError. nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback( ErrorStatus status, const sp<IPreparedModel>& preparedModel); diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h index 2812db2546..594d727d5d 100644 --- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h +++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h @@ -39,7 +39,7 @@ using V1_2::utils::kDefaultMesaureTiming; using V1_2::utils::kNoTiming; constexpr auto kDefaultPriority = Priority::MEDIUM; -constexpr auto kVersion = nn::Version::ANDROID_R; +constexpr auto kVersion = nn::kVersionFeatureLevel4; template <typename Type> nn::Result<void> validate(const Type& halObject) { @@ -62,7 +62,7 @@ bool valid(const Type& halObject) { template <typename Type> nn::Result<void> compliantVersion(const Type& canonical) { const auto version = NN_TRY(nn::validate(canonical)); - if (version > kVersion) { + if (!nn::isCompliantVersion(version, kVersion)) { return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion; } return {}; diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp index a73ce82ed2..9517fda877 100644 --- a/neuralnetworks/1.3/utils/src/Device.cpp +++ b/neuralnetworks/1.3/utils/src/Device.cpp @@ -143,7 +143,7 @@ const std::string& Device::getVersionString() const { } nn::Version Device::getFeatureLevel() const { - return nn::Version::ANDROID_R; + return kVersion; } nn::DeviceType Device::getType() const { diff --git a/neuralnetworks/1.3/utils/test/DeviceTest.cpp b/neuralnetworks/1.3/utils/test/DeviceTest.cpp index 2d1b2f295a..7eba4bc935 100644 --- a/neuralnetworks/1.3/utils/test/DeviceTest.cpp +++ b/neuralnetworks/1.3/utils/test/DeviceTest.cpp @@ -505,7 +505,7 @@ TEST(DeviceTest, getFeatureLevel) { const auto featureLevel = device->getFeatureLevel(); // verify result - EXPECT_EQ(featureLevel, nn::Version::ANDROID_R); + EXPECT_EQ(featureLevel, nn::kVersionFeatureLevel4); } TEST(DeviceTest, getCachedData) { diff --git a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl index 2eff11b146..34506c8860 100644 --- a/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl +++ b/neuralnetworks/aidl/aidl_api/android.hardware.neuralnetworks/current/android/hardware/neuralnetworks/OperationType.aidl @@ -138,4 +138,6 @@ enum OperationType { RANK = 101, BATCH_MATMUL = 102, PACK = 103, + MIRROR_PAD = 104, + REVERSE = 105, } diff --git a/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl b/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl index 2ec91acf30..aebe8d9c93 100644 --- a/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl +++ b/neuralnetworks/aidl/android/hardware/neuralnetworks/OperationType.aidl @@ -4318,6 +4318,8 @@ enum OperationType { * Supported tensor {@link OperandType}: * * {@link OperandType::TENSOR_FLOAT16} * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since NNAPI feature level 7) + * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 7) * * Supported tensor rank: from 1. * @@ -4326,6 +4328,9 @@ enum OperationType { * * Outputs: * * 0: The output tensor of same shape as input0. + * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and + * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint can be different from inputs' scale and zeroPoint. */ RSQRT = 83, @@ -5322,4 +5327,68 @@ enum OperationType { * * 0: The packed tensor. */ PACK = 103, + + /** + * Pads a tensor with mirrored values. + * + * Supported tensor {@link OperandType}: + * * {@link OperandType::TENSOR_FLOAT16} + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} + * * {@link OperandType::TENSOR_INT32} + * + * Supported tensor rank: from 1. + * + * Inputs: + * * 0: An n-D tensor, specifying the tensor to be padded. + * * 1: A 2-D tensor of {@link OperandType::TENSOR_INT32}, the paddings + * for each spatial dimension of the input tensor. The shape of the + * tensor must be {rank(input0), 2}. + * padding[i, 0] specifies the number of elements to be padded in the + * front of dimension i. + * padding[i, 1] specifies the number of elements to be padded after the + * end of dimension i. + * * 2: An {@link OperandType::INT32} scalar, specifying the mode. + * Options are 0:REFLECT and 1:SYMMETRIC. + * + * Outputs: + * * 0: A tensor of the same {@link OperandType} as input0. The + * output tensor has the same rank as input0, and each + * dimension of the output tensor has the same size as the + * corresponding dimension of the input tensor plus the size + * of the padding: + * output0.dimension[i] = + * padding[i, 0] + input0.dimension[i] + padding[i, 1] + * For a {@link OperandType::TENSOR_QUANT8_ASYMM} and + * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensor, + * the scale and zeroPoint must be the same as input0. + */ + MIRROR_PAD = 104, + + /** + * Reverses a specified dimension of a tensor. + * + * Supported tensor {@link OperandType}: + * * {@link OperandType::TENSOR_FLOAT16} + * * {@link OperandType::TENSOR_FLOAT32} + * * {@link OperandType::TENSOR_QUANT8_ASYMM} + * * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} + * * {@link OperandType::TENSOR_INT32} + * + * Supported tensor rank: up to 8. + * + * Inputs: + * * 0: Input tensor of rank n. + * * 1: Axis tensor of type {@link OperandType::TENSOR_INT32} and shape [1], + * specifying which dimension of the input tensor is to be reversed. The dimension + * must be in the range [0, n). + * + * Outputs: + * * 0: The reversed tensor. + * For {@link OperandType::TENSOR_QUANT8_ASYMM} and + * {@link OperandType::TENSOR_QUANT8_ASYMM_SIGNED} tensors, + * the scales and zeroPoint must be the same as input0. + */ + REVERSE = 105, } diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp index 1a4cd9ad3b..e3561042be 100644 --- a/neuralnetworks/aidl/utils/Android.bp +++ b/neuralnetworks/aidl/utils/Android.bp @@ -56,13 +56,21 @@ cc_library_static { } cc_library_static { - name: "neuralnetworks_utils_hal_aidl", + name: "neuralnetworks_utils_hal_aidl_v2", defaults: ["neuralnetworks_utils_hal_aidl_defaults"], shared_libs: [ "android.hardware.neuralnetworks-V2-ndk", ], } +cc_library_static { + name: "neuralnetworks_utils_hal_aidl", + defaults: ["neuralnetworks_utils_hal_aidl_defaults"], + shared_libs: [ + "android.hardware.neuralnetworks-V3-ndk", + ], +} + // A cc_defaults that includes the latest non-experimental AIDL utilities and other AIDL libraries // that are commonly used together. Modules that always depend on the latest non-experimental // AIDL features can include this cc_defaults to avoid managing dependency versions explicitly. @@ -71,7 +79,7 @@ cc_defaults { static_libs: [ "android.hardware.common-V2-ndk", "android.hardware.graphics.common-V3-ndk", - "android.hardware.neuralnetworks-V2-ndk", + "android.hardware.neuralnetworks-V3-ndk", "neuralnetworks_utils_hal_aidl", ], } diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h index f2ab479e43..a27487e17c 100644 --- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h +++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h @@ -19,6 +19,7 @@ #include "nnapi/hal/aidl/Conversions.h" +#include <aidl/android/hardware/neuralnetworks/IDevice.h> #include <android-base/logging.h> #include <nnapi/Result.h> #include <nnapi/TypeUtils.h> @@ -28,7 +29,21 @@ namespace aidl::android::hardware::neuralnetworks::utils { constexpr auto kDefaultPriority = Priority::MEDIUM; -constexpr auto kVersion = nn::Version::FEATURE_LEVEL_6; + +constexpr std::optional<nn::Version> aidlVersionToCanonicalVersion(int aidlVersion) { + switch (aidlVersion) { + case 1: + return nn::kVersionFeatureLevel5; + case 2: + return nn::kVersionFeatureLevel6; + case 3: + return nn::kVersionFeatureLevel7; + default: + return std::nullopt; + } +} + +constexpr auto kVersion = aidlVersionToCanonicalVersion(IDevice::version).value(); template <typename Type> nn::Result<void> validate(const Type& halObject) { @@ -51,7 +66,7 @@ bool valid(const Type& halObject) { template <typename Type> nn::Result<void> compliantVersion(const Type& canonical) { const auto version = NN_TRY(nn::validate(canonical)); - if (version > kVersion) { + if (!nn::isCompliantVersion(version, kVersion)) { return NN_ERROR() << "Insufficient version: " << version << " vs required " << kVersion; } return {}; diff --git a/neuralnetworks/aidl/utils/src/Callbacks.cpp b/neuralnetworks/aidl/utils/src/Callbacks.cpp index a32147734c..8084970690 100644 --- a/neuralnetworks/aidl/utils/src/Callbacks.cpp +++ b/neuralnetworks/aidl/utils/src/Callbacks.cpp @@ -35,7 +35,8 @@ namespace { // Converts the results of IDevice::prepareModel* to the NN canonical format. On success, this // function returns with a non-null nn::SharedPreparedModel with a feature level of -// nn::Version::ANDROID_S. On failure, this function returns with the appropriate nn::GeneralError. +// nn::kVersionFeatureLevel5. On failure, this function returns with the appropriate +// nn::GeneralError. nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback( ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel) { HANDLE_STATUS_AIDL(status) << "model preparation failed with " << toString(status); diff --git a/neuralnetworks/aidl/utils/src/Service.cpp b/neuralnetworks/aidl/utils/src/Service.cpp index 01772eed53..e48593c38e 100644 --- a/neuralnetworks/aidl/utils/src/Service.cpp +++ b/neuralnetworks/aidl/utils/src/Service.cpp @@ -46,13 +46,11 @@ nn::GeneralResult<nn::Version> getAidlServiceFeatureLevel(IDevice* service) { aidlVersion = std::min(aidlVersion, IDevice::version); // Map stable AIDL versions to canonical versions. - switch (aidlVersion) { - case 1: - return nn::Version::ANDROID_S; - case 2: - return nn::Version::FEATURE_LEVEL_6; + auto version = aidlVersionToCanonicalVersion(aidlVersion); + if (!version.has_value()) { + return NN_ERROR() << "Unknown AIDL service version: " << aidlVersion; } - return NN_ERROR() << "Unknown AIDL service version: " << aidlVersion; + return version.value(); } } // namespace diff --git a/neuralnetworks/aidl/utils/test/DeviceTest.cpp b/neuralnetworks/aidl/utils/test/DeviceTest.cpp index 79abe1b19b..0366e7dff0 100644 --- a/neuralnetworks/aidl/utils/test/DeviceTest.cpp +++ b/neuralnetworks/aidl/utils/test/DeviceTest.cpp @@ -152,13 +152,17 @@ class DeviceTest : public ::testing::TestWithParam<nn::Version> { }; std::string printDeviceTest(const testing::TestParamInfo<nn::Version>& info) { - switch (info.param) { - case nn::Version::ANDROID_S: + const nn::Version version = info.param; + CHECK(!version.runtimeOnlyFeatures); + switch (version.level) { + case nn::Version::Level::FEATURE_LEVEL_5: return "v1"; - case nn::Version::FEATURE_LEVEL_6: + case nn::Version::Level::FEATURE_LEVEL_6: return "v2"; + case nn::Version::Level::FEATURE_LEVEL_7: + return "v3"; default: - LOG(FATAL) << "Invalid AIDL version: " << info.param; + LOG(FATAL) << "Invalid AIDL version: " << version; return "invalid"; } } @@ -891,7 +895,8 @@ TEST_P(DeviceTest, allocateDeadObject) { } INSTANTIATE_TEST_SUITE_P(TestDevice, DeviceTest, - ::testing::Values(nn::Version::ANDROID_S, nn::Version::FEATURE_LEVEL_6), + ::testing::Values(nn::kVersionFeatureLevel5, nn::kVersionFeatureLevel6, + nn::kVersionFeatureLevel7), printDeviceTest); } // namespace aidl::android::hardware::neuralnetworks::utils diff --git a/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp b/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp index 3abd724c8c..0488b6359b 100644 --- a/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp +++ b/neuralnetworks/utils/common/test/ResilientDeviceTest.cpp @@ -28,7 +28,6 @@ namespace android::hardware::neuralnetworks::utils { namespace { using ::testing::_; -using ::testing::InvokeWithoutArgs; using ::testing::Return; using SharedMockDevice = std::shared_ptr<const nn::MockDevice>; @@ -54,7 +53,7 @@ SharedMockDevice createConfiguredMockDevice() { // Setup default actions for each relevant call. constexpr auto getName_ret = []() -> const std::string& { return kName; }; constexpr auto getVersionString_ret = []() -> const std::string& { return kVersionString; }; - constexpr auto kFeatureLevel = nn::Version::ANDROID_OC_MR1; + constexpr auto kFeatureLevel = nn::kVersionFeatureLevel1; constexpr auto kDeviceType = nn::DeviceType::ACCELERATOR; constexpr auto getSupportedExtensions_ret = []() -> const std::vector<nn::Extension>& { return kExtensions; @@ -142,7 +141,7 @@ TEST(ResilientDeviceTest, cachedData) { TEST(ResilientDeviceTest, getFeatureLevel) { // setup call const auto [mockDevice, mockDeviceFactory, device] = setup(); - constexpr auto kFeatureLevel = nn::Version::ANDROID_OC_MR1; + constexpr auto kFeatureLevel = nn::kVersionFeatureLevel1; EXPECT_CALL(*mockDevice, getFeatureLevel()).Times(1).WillOnce(Return(kFeatureLevel)); // run test @@ -592,7 +591,7 @@ TEST(ResilientDeviceTest, recoverCacheMismatchGetFeatureLevel) { const auto recoveredMockDevice = createConfiguredMockDevice(); EXPECT_CALL(*recoveredMockDevice, getFeatureLevel()) .Times(1) - .WillOnce(Return(nn::Version::ANDROID_P)); + .WillOnce(Return(nn::kVersionFeatureLevel2)); EXPECT_CALL(*mockDeviceFactory, Call(false)).Times(1).WillOnce(Return(recoveredMockDevice)); // run test |