diff options
| author | Przemyslaw Szczepaniak <pszczepaniak@google.com> | 2018-09-24 12:25:55 +0000 |
|---|---|---|
| committer | Gerrit Code Review <noreply-gerritcodereview@google.com> | 2018-09-24 12:25:55 +0000 |
| commit | 96e08aa749446ad049ddc43c0b7c94e92a62d0eb (patch) | |
| tree | 07922bcb6ab782a40d9cdd3d24fe8227166cee6c | |
| parent | 8f8c756522160c1a8dbec91ff713c02dbd822af9 (diff) | |
| parent | feb87a9cf89d44f483fae9113b0dd5ff6a713da4 (diff) | |
Merge changes from topic "nnapisync_1.2"
* changes:
Add VTS tests for NeuralNetworks v1.2
Create NeuralNetworks HAL v1.2 for new OperationTypes
| -rw-r--r-- | neuralnetworks/1.0/vts/functional/Android.bp | 4 | ||||
| -rw-r--r-- | neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp | 52 | ||||
| -rw-r--r-- | neuralnetworks/1.1/vts/functional/Android.bp | 1 | ||||
| -rw-r--r-- | neuralnetworks/1.2/Android.bp | 24 | ||||
| -rw-r--r-- | neuralnetworks/1.2/IDevice.hal | 106 | ||||
| -rw-r--r-- | neuralnetworks/1.2/types.hal | 112 | ||||
| -rw-r--r-- | neuralnetworks/1.2/vts/OWNERS | 14 | ||||
| -rw-r--r-- | neuralnetworks/1.2/vts/functional/Android.bp | 52 | ||||
| -rw-r--r-- | neuralnetworks/1.2/vts/functional/BasicTests.cpp | 45 | ||||
| -rw-r--r-- | neuralnetworks/1.2/vts/functional/GeneratedTests.cpp | 60 | ||||
| -rw-r--r-- | neuralnetworks/1.2/vts/functional/Models.h | 378 | ||||
| -rw-r--r-- | neuralnetworks/1.2/vts/functional/ValidateModel.cpp | 538 | ||||
| -rw-r--r-- | neuralnetworks/1.2/vts/functional/ValidateRequest.cpp | 261 | ||||
| -rw-r--r-- | neuralnetworks/1.2/vts/functional/ValidationTests.cpp | 50 | ||||
| -rw-r--r-- | neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp | 86 | ||||
| -rw-r--r-- | neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h | 92 |
16 files changed, 1874 insertions, 1 deletions
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp index e28113bcdc..18f35c1a16 100644 --- a/neuralnetworks/1.0/vts/functional/Android.bp +++ b/neuralnetworks/1.0/vts/functional/Android.bp @@ -25,6 +25,7 @@ cc_library_static { static_libs: [ "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.2", "android.hidl.allocator@1.0", "android.hidl.memory@1.0", "libhidlmemory", @@ -49,8 +50,9 @@ cc_test { ], defaults: ["VtsHalTargetTestDefaults"], static_libs: [ - "android.hardware.neuralnetworks@1.1", "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.2", "android.hidl.allocator@1.0", "android.hidl.memory@1.0", "libhidlmemory", diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp index 64495cf763..b8046c79b2 100644 --- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp +++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp @@ -275,6 +275,58 @@ void Execute(const sp<V1_1::IDevice>& device, std::function<V1_1::Model(void)> c EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol); } +// TODO: Reduce code duplication. +void Execute(const sp<V1_2::IDevice>& device, std::function<V1_2::Model(void)> create_model, + std::function<bool(int)> is_ignored, + const std::vector<MixedTypedExampleType>& examples) { + V1_2::Model model = create_model(); + + // see if service can handle model + bool fullySupportsModel = false; + Return<void> supportedCall = device->getSupportedOperations_1_2( + model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = + std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedCall.isOk()); + + // launch prepare model + sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2( + model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus)); + + // retrieve prepared model + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); + + // early termination if vendor service cannot fully prepare model + if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { + ASSERT_EQ(nullptr, preparedModel.get()); + LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Early termination of test because vendor service cannot " + "prepare model that it does not support." + << std::endl; + return; + } + EXPECT_EQ(ErrorStatus::NONE, prepareReturnStatus); + ASSERT_NE(nullptr, preparedModel.get()); + + // TODO: Adjust the error limit based on testing. + // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16. + float fpAtol = !model.relaxComputationFloat32toFloat16 ? 1e-5f : 5.0f * 0.0009765625f; + // Set the relative tolerance to be 5ULP of the corresponding FP precision. + float fpRtol = !model.relaxComputationFloat32toFloat16 ? 5.0f * 1.1920928955078125e-7f + : 5.0f * 0.0009765625f; + EvaluatePreparedModel(preparedModel, is_ignored, examples, fpAtol, fpRtol); +} + } // namespace generated_tests } // namespace neuralnetworks diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp index f755c20be5..52a804a8a3 100644 --- a/neuralnetworks/1.1/vts/functional/Android.bp +++ b/neuralnetworks/1.1/vts/functional/Android.bp @@ -28,6 +28,7 @@ cc_test { static_libs: [ "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.2", "android.hidl.allocator@1.0", "android.hidl.memory@1.0", "libhidlmemory", diff --git a/neuralnetworks/1.2/Android.bp b/neuralnetworks/1.2/Android.bp new file mode 100644 index 0000000000..e183a263fa --- /dev/null +++ b/neuralnetworks/1.2/Android.bp @@ -0,0 +1,24 @@ +// This file is autogenerated by hidl-gen -Landroidbp. + +hidl_interface { + name: "android.hardware.neuralnetworks@1.2", + root: "android.hardware", + vndk: { + enabled: true, + }, + srcs: [ + "types.hal", + "IDevice.hal", + ], + interfaces: [ + "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", + "android.hidl.base@1.0", + ], + types: [ + "Model", + "Operation", + "OperationType", + ], + gen_java: false, +} diff --git a/neuralnetworks/1.2/IDevice.hal b/neuralnetworks/1.2/IDevice.hal new file mode 100644 index 0000000000..9cc23a26f5 --- /dev/null +++ b/neuralnetworks/1.2/IDevice.hal @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.hardware.neuralnetworks@1.2; + +import @1.0::ErrorStatus; +import @1.0::IPreparedModelCallback; +import @1.1::ExecutionPreference; +import @1.1::IDevice; + +/** + * This interface represents a device driver. + */ +interface IDevice extends @1.1::IDevice { + /** + * Gets the supported operations in a model. + * + * getSupportedOperations indicates which operations of a model are fully + * supported by the vendor driver. If an operation may not be supported for + * any reason, getSupportedOperations must return false for that operation. + * + * @param model A model whose operations--and their corresponding operands-- + * are to be verified by the driver. + * @return status Error status of the call, must be: + * - NONE if successful + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if provided model is invalid + * @return supportedOperations A list of supported operations, where true + * indicates the operation is supported and false indicates the + * operation is not supported. The index of "supported" corresponds with + * the index of the operation it is describing. + */ + getSupportedOperations_1_2(Model model) + generates (ErrorStatus status, vec<bool> supportedOperations); + + /** + * Creates a prepared model for execution. + * + * prepareModel is used to make any necessary transformations or alternative + * representations to a model for execution, possibly including + * transformations on the constant data, optimization on the model's graph, + * or compilation into the device's native binary format. The model itself + * is not changed. + * + * The model is prepared asynchronously with respect to the caller. The + * prepareModel function must verify the inputs to the prepareModel function + * are correct. If there is an error, prepareModel must immediately invoke + * the callback with the appropriate ErrorStatus value and nullptr for the + * IPreparedModel, then return with the same ErrorStatus. If the inputs to + * the prepareModel function are valid and there is no error, prepareModel + * must launch an asynchronous task to prepare the model in the background, + * and immediately return from prepareModel with ErrorStatus::NONE. If the + * asynchronous task fails to launch, prepareModel must immediately invoke + * the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the + * IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE. + * + * When the asynchronous task has finished preparing the model, it must + * immediately invoke the callback function provided as an input to + * prepareModel. If the model was prepared successfully, the callback object + * must be invoked with an error status of ErrorStatus::NONE and the + * produced IPreparedModel object. If an error occurred preparing the model, + * the callback object must be invoked with the appropriate ErrorStatus + * value and nullptr for the IPreparedModel. + * + * The only information that may be unknown to the model at this stage is + * the shape of the tensors, which may only be known at execution time. As + * such, some driver services may return partially prepared models, where + * the prepared model may only be finished when it is paired with a set of + * inputs to the model. Note that the same prepared model object may be + * used with different shapes of inputs on different (possibly concurrent) + * executions. + * + * Multiple threads may call prepareModel on the same model concurrently. + * + * @param model The model to be prepared for execution. + * @param preference Indicates the intended execution behavior of a prepared + * model. + * @param callback A callback object used to return the error status of + * preparing the model for execution and the prepared model if + * successful, nullptr otherwise. The callback object's notify function + * must be called exactly once, even if the model could not be prepared. + * @return status Error status of launching a task which prepares the model + * in the background; must be: + * - NONE if preparation task is successfully launched + * - DEVICE_UNAVAILABLE if driver is offline or busy + * - GENERAL_FAILURE if there is an unspecified error + * - INVALID_ARGUMENT if one of the input arguments is invalid + */ + prepareModel_1_2(Model model, ExecutionPreference preference, + IPreparedModelCallback callback) + generates (ErrorStatus status); +}; diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal new file mode 100644 index 0000000000..06606cc330 --- /dev/null +++ b/neuralnetworks/1.2/types.hal @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package android.hardware.neuralnetworks@1.2; + +import @1.0::Operand; +import @1.0::PerformanceInfo; +import @1.1::OperationType; + +/** + * Operation types. + * + * The type of an operation in a model. + */ +enum OperationType : @1.1::OperationType { +}; + +/** + * Describes one operation of the model's graph. + */ +struct Operation { + /** + * The operation type. + */ + OperationType type; + + /** + * Describes the table that contains the indexes of the inputs of the + * operation. The offset is the index in the operandIndexes table. + */ + vec<uint32_t> inputs; + + /** + * Describes the table that contains the indexes of the outputs of the + * operation. The offset is the index in the operandIndexes table. + */ + vec<uint32_t> outputs; +}; + +/** + * A Neural Network Model. + * + * This includes not only the execution graph, but also constant data such as + * weights or scalars added at construction time. The only information that + * may not be known is the shape of the input tensors. + */ +struct Model { + /** + * All operands included in the model. + */ + vec<Operand> operands; + + /** + * All operations included in the model. + * + * The operations are sorted into execution order. Every operand + * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be + * written before it is read. + */ + vec<Operation> operations; + + /** + * Input indexes of the model. There must be at least one. + * + * Each value corresponds to the index of the operand in "operands". + */ + vec<uint32_t> inputIndexes; + + /** + * Output indexes of the model. There must be at least one. + * + * Each value corresponds to the index of the operand in "operands". + */ + vec<uint32_t> outputIndexes; + + /** + * A byte buffer containing operand data that were copied into the model. + * + * An operand's value must be located here if and only if Operand::lifetime + * equals OperandLifeTime::CONSTANT_COPY. + */ + vec<uint8_t> operandValues; + + /** + * A collection of shared memory pools containing operand values. + * + * An operand's value must be located here if and only if Operand::lifetime + * equals OperandLifeTime::CONSTANT_REFERENCE. + */ + vec<memory> pools; + + /** + * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or + * precision as low as that of the IEEE 754 16-bit floating-point format. + * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the + * range and precision of the IEEE 754 32-bit floating-point format. + */ + bool relaxComputationFloat32toFloat16; +}; diff --git a/neuralnetworks/1.2/vts/OWNERS b/neuralnetworks/1.2/vts/OWNERS new file mode 100644 index 0000000000..8f254365f6 --- /dev/null +++ b/neuralnetworks/1.2/vts/OWNERS @@ -0,0 +1,14 @@ +# Neuralnetworks team +butlermichael@google.com +dgross@google.com +jeanluc@google.com +levp@google.com +miaowang@google.com +mikie@google.com +mks@google.com +pszczepaniak@google.com +slavash@google.com + +# VTS team +yim@google.com +yuexima@google.com diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp new file mode 100644 index 0000000000..2dc19ccbe4 --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/Android.bp @@ -0,0 +1,52 @@ +// +// Copyright (C) 2018 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +cc_test { + name: "VtsHalNeuralnetworksV1_2TargetTest", + srcs: [ + "BasicTests.cpp", + "GeneratedTests.cpp", + "ValidateModel.cpp", + "ValidateRequest.cpp", + "ValidationTests.cpp", + "VtsHalNeuralnetworks.cpp", + ], + defaults: ["VtsHalTargetTestDefaults"], + static_libs: [ + "android.hardware.neuralnetworks@1.0", + "android.hardware.neuralnetworks@1.1", + "android.hardware.neuralnetworks@1.2", + "android.hidl.allocator@1.0", + "android.hidl.memory@1.0", + "libhidlmemory", + "libneuralnetworks_utils", + "VtsHalNeuralnetworksTest_utils", + ], + header_libs: [ + "libneuralnetworks_headers", + "libneuralnetworks_generated_test_harness_headers", + "libneuralnetworks_generated_tests", + ], + // Bug: http://b/74200014 - Disable arm32 asan since it triggers internal + // error in ld.gold. + arch: { + arm: { + sanitize: { + never: true, + }, + }, + }, +} diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp new file mode 100644 index 0000000000..d2dea1dc75 --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_2 { +namespace vts { +namespace functional { + +using V1_1::Capabilities; + +// create device test +TEST_F(NeuralnetworksHidlTest, CreateDevice) {} + +// status test +TEST_F(NeuralnetworksHidlTest, StatusTest) { + Return<DeviceStatus> status = device->getStatus(); + ASSERT_TRUE(status.isOk()); + EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status)); +} + +} // namespace functional +} // namespace vts +} // namespace V1_2 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp new file mode 100644 index 0000000000..662c531340 --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/GeneratedTests.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" +#include "TestHarness.h" +#include "Utils.h" + +#include <android-base/logging.h> +#include <android/hidl/memory/1.0/IMemory.h> +#include <hidlmemory/mapping.h> + +namespace android { +namespace hardware { +namespace neuralnetworks { + +namespace generated_tests { +using ::test_helper::MixedTypedExampleType; +extern void Execute(const sp<V1_2::IDevice>&, std::function<V1_2::Model(void)>, + std::function<bool(int)>, const std::vector<MixedTypedExampleType>&); +} // namespace generated_tests + +namespace V1_2 { +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using ::android::nn::allocateSharedMemory; + +// Mixed-typed examples +typedef generated_tests::MixedTypedExampleType MixedTypedExample; + +// in frameworks/ml/nn/runtime/tests/generated/ +#include "all_generated_V1_0_vts_tests.cpp" +#include "all_generated_V1_1_vts_tests.cpp" +#include "all_generated_V1_2_vts_tests.cpp" + +} // namespace functional +} // namespace vts +} // namespace V1_2 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.2/vts/functional/Models.h b/neuralnetworks/1.2/vts/functional/Models.h new file mode 100644 index 0000000000..f3769bc677 --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/Models.h @@ -0,0 +1,378 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_H +#define VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_H + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "TestHarness.h" + +#include <android/hardware/neuralnetworks/1.0/types.h> +#include <android/hardware/neuralnetworks/1.1/types.h> +#include <android/hardware/neuralnetworks/1.2/types.h> + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_2 { +namespace vts { +namespace functional { + +using MixedTypedExample = test_helper::MixedTypedExampleType; + +#define FOR_EACH_TEST_MODEL(FN) \ + FN(add) \ + FN(add_broadcast_quant8) \ + FN(add_quant8) \ + FN(add_relaxed) \ + FN(avg_pool_float_1) \ + FN(avg_pool_float_1_relaxed) \ + FN(avg_pool_float_2) \ + FN(avg_pool_float_2_relaxed) \ + FN(avg_pool_float_3) \ + FN(avg_pool_float_3_relaxed) \ + FN(avg_pool_float_4) \ + FN(avg_pool_float_4_relaxed) \ + FN(avg_pool_float_5) \ + FN(avg_pool_float_5_relaxed) \ + FN(avg_pool_quant8_1) \ + FN(avg_pool_quant8_2) \ + FN(avg_pool_quant8_3) \ + FN(avg_pool_quant8_4) \ + FN(avg_pool_quant8_5) \ + FN(batch_to_space) \ + FN(batch_to_space_float_1) \ + FN(batch_to_space_float_1_relaxed) \ + FN(batch_to_space_quant8_1) \ + FN(batch_to_space_relaxed) \ + FN(concat_float_1) \ + FN(concat_float_1_relaxed) \ + FN(concat_float_2) \ + FN(concat_float_2_relaxed) \ + FN(concat_float_3) \ + FN(concat_float_3_relaxed) \ + FN(concat_quant8_1) \ + FN(concat_quant8_2) \ + FN(concat_quant8_3) \ + FN(conv_1_h3_w2_SAME) \ + FN(conv_1_h3_w2_SAME_relaxed) \ + FN(conv_1_h3_w2_VALID) \ + FN(conv_1_h3_w2_VALID_relaxed) \ + FN(conv_3_h3_w2_SAME) \ + FN(conv_3_h3_w2_SAME_relaxed) \ + FN(conv_3_h3_w2_VALID) \ + FN(conv_3_h3_w2_VALID_relaxed) \ + FN(conv_float) \ + FN(conv_float_2) \ + FN(conv_float_2_relaxed) \ + FN(conv_float_channels) \ + FN(conv_float_channels_relaxed) \ + FN(conv_float_channels_weights_as_inputs) \ + FN(conv_float_channels_weights_as_inputs_relaxed) \ + FN(conv_float_large) \ + FN(conv_float_large_relaxed) \ + FN(conv_float_large_weights_as_inputs) \ + FN(conv_float_large_weights_as_inputs_relaxed) \ + FN(conv_float_relaxed) \ + FN(conv_float_weights_as_inputs) \ + FN(conv_float_weights_as_inputs_relaxed) \ + FN(conv_quant8) \ + FN(conv_quant8_2) \ + FN(conv_quant8_channels) \ + FN(conv_quant8_channels_weights_as_inputs) \ + FN(conv_quant8_large) \ + FN(conv_quant8_large_weights_as_inputs) \ + FN(conv_quant8_overflow) \ + FN(conv_quant8_overflow_weights_as_inputs) \ + FN(conv_quant8_weights_as_inputs) \ + FN(depth_to_space_float_1) \ + FN(depth_to_space_float_1_relaxed) \ + FN(depth_to_space_float_2) \ + FN(depth_to_space_float_2_relaxed) \ + FN(depth_to_space_float_3) \ + FN(depth_to_space_float_3_relaxed) \ + FN(depth_to_space_quant8_1) \ + FN(depth_to_space_quant8_2) \ + FN(depthwise_conv) \ + FN(depthwise_conv2d_float) \ + FN(depthwise_conv2d_float_2) \ + FN(depthwise_conv2d_float_2_relaxed) \ + FN(depthwise_conv2d_float_large) \ + FN(depthwise_conv2d_float_large_2) \ + FN(depthwise_conv2d_float_large_2_relaxed) \ + FN(depthwise_conv2d_float_large_2_weights_as_inputs) \ + FN(depthwise_conv2d_float_large_2_weights_as_inputs_relaxed) \ + FN(depthwise_conv2d_float_large_relaxed) \ + FN(depthwise_conv2d_float_large_weights_as_inputs) \ + FN(depthwise_conv2d_float_large_weights_as_inputs_relaxed) \ + FN(depthwise_conv2d_float_relaxed) \ + FN(depthwise_conv2d_float_weights_as_inputs) \ + FN(depthwise_conv2d_float_weights_as_inputs_relaxed) \ + FN(depthwise_conv2d_quant8) \ + FN(depthwise_conv2d_quant8_2) \ + FN(depthwise_conv2d_quant8_large) \ + FN(depthwise_conv2d_quant8_large_weights_as_inputs) \ + FN(depthwise_conv2d_quant8_weights_as_inputs) \ + FN(depthwise_conv_relaxed) \ + FN(dequantize) \ + FN(dequantize_relaxed) \ + FN(div) \ + FN(div_broadcast_float) \ + FN(div_broadcast_float_relaxed) \ + FN(div_relaxed) \ + FN(embedding_lookup) \ + FN(embedding_lookup_relaxed) \ + FN(floor) \ + FN(floor_relaxed) \ + FN(fully_connected_float) \ + FN(fully_connected_float_2) \ + FN(fully_connected_float_2_relaxed) \ + FN(fully_connected_float_4d_simple) \ + FN(fully_connected_float_4d_simple_relaxed) \ + FN(fully_connected_float_large) \ + FN(fully_connected_float_large_relaxed) \ + FN(fully_connected_float_large_weights_as_inputs) \ + FN(fully_connected_float_large_weights_as_inputs_relaxed) \ + FN(fully_connected_float_relaxed) \ + FN(fully_connected_float_weights_as_inputs) \ + FN(fully_connected_float_weights_as_inputs_relaxed) \ + FN(fully_connected_quant8) \ + FN(fully_connected_quant8_2) \ + FN(fully_connected_quant8_large) \ + FN(fully_connected_quant8_large_weights_as_inputs) \ + FN(fully_connected_quant8_weights_as_inputs) \ + FN(hashtable_lookup_float) \ + FN(hashtable_lookup_float_relaxed) \ + FN(hashtable_lookup_quant8) \ + FN(l2_normalization) \ + FN(l2_normalization_2) \ + FN(l2_normalization_2_relaxed) \ + FN(l2_normalization_large) \ + FN(l2_normalization_large_relaxed) \ + FN(l2_normalization_relaxed) \ + FN(l2_pool_float) \ + FN(l2_pool_float_2) \ + FN(l2_pool_float_2_relaxed) \ + FN(l2_pool_float_large) \ + FN(l2_pool_float_large_relaxed) \ + FN(l2_pool_float_relaxed) \ + FN(local_response_norm_float_1) \ + FN(local_response_norm_float_1_relaxed) \ + FN(local_response_norm_float_2) \ + FN(local_response_norm_float_2_relaxed) \ + FN(local_response_norm_float_3) \ + FN(local_response_norm_float_3_relaxed) \ + FN(local_response_norm_float_4) \ + FN(local_response_norm_float_4_relaxed) \ + FN(logistic_float_1) \ + FN(logistic_float_1_relaxed) \ + FN(logistic_float_2) \ + FN(logistic_float_2_relaxed) \ + FN(logistic_quant8_1) \ + FN(logistic_quant8_2) \ + FN(lsh_projection) \ + FN(lsh_projection_2) \ + FN(lsh_projection_2_relaxed) \ + FN(lsh_projection_relaxed) \ + FN(lsh_projection_weights_as_inputs) \ + FN(lsh_projection_weights_as_inputs_relaxed) \ + FN(lstm) \ + FN(lstm2) \ + FN(lstm2_relaxed) \ + FN(lstm2_state) \ + FN(lstm2_state2) \ + FN(lstm2_state2_relaxed) \ + FN(lstm2_state_relaxed) \ + FN(lstm3) \ + FN(lstm3_relaxed) \ + FN(lstm3_state) \ + FN(lstm3_state2) \ + FN(lstm3_state2_relaxed) \ + FN(lstm3_state3) \ + FN(lstm3_state3_relaxed) \ + FN(lstm3_state_relaxed) \ + FN(lstm_relaxed) \ + FN(lstm_state) \ + FN(lstm_state2) \ + FN(lstm_state2_relaxed) \ + FN(lstm_state_relaxed) \ + FN(max_pool_float_1) \ + FN(max_pool_float_1_relaxed) \ + FN(max_pool_float_2) \ + FN(max_pool_float_2_relaxed) \ + FN(max_pool_float_3) \ + FN(max_pool_float_3_relaxed) \ + FN(max_pool_float_4) \ + FN(max_pool_float_4_relaxed) \ + FN(max_pool_quant8_1) \ + FN(max_pool_quant8_2) \ + FN(max_pool_quant8_3) \ + FN(max_pool_quant8_4) \ + FN(mean) \ + FN(mean_float_1) \ + FN(mean_float_1_relaxed) \ + FN(mean_float_2) \ + FN(mean_float_2_relaxed) \ + FN(mean_quant8_1) \ + FN(mean_quant8_2) \ + FN(mean_relaxed) \ + FN(mobilenet_224_gender_basic_fixed) \ + FN(mobilenet_224_gender_basic_fixed_relaxed) \ + FN(mobilenet_quantized) \ + FN(mul) \ + FN(mul_broadcast_quant8) \ + FN(mul_quant8) \ + FN(mul_relaxed) \ + FN(mul_relu) \ + FN(mul_relu_relaxed) \ + FN(pad) \ + FN(pad_float_1) \ + FN(pad_float_1_relaxed) \ + FN(pad_relaxed) \ + FN(relu1_float_1) \ + FN(relu1_float_1_relaxed) \ + FN(relu1_float_2) \ + FN(relu1_float_2_relaxed) \ + FN(relu1_quant8_1) \ + FN(relu1_quant8_2) \ + FN(relu6_float_1) \ + FN(relu6_float_1_relaxed) \ + FN(relu6_float_2) \ + FN(relu6_float_2_relaxed) \ + FN(relu6_quant8_1) \ + FN(relu6_quant8_2) \ + FN(relu_float_1) \ + FN(relu_float_1_relaxed) \ + FN(relu_float_2) \ + FN(relu_float_2_relaxed) \ + FN(relu_quant8_1) \ + FN(relu_quant8_2) \ + FN(reshape) \ + FN(reshape_quant8) \ + FN(reshape_quant8_weights_as_inputs) \ + FN(reshape_relaxed) \ + FN(reshape_weights_as_inputs) \ + FN(reshape_weights_as_inputs_relaxed) \ + FN(resize_bilinear) \ + FN(resize_bilinear_2) \ + FN(resize_bilinear_2_relaxed) \ + FN(resize_bilinear_relaxed) \ + FN(rnn) \ + FN(rnn_relaxed) \ + FN(rnn_state) \ + FN(rnn_state_relaxed) \ + FN(softmax_float_1) \ + FN(softmax_float_1_relaxed) \ + FN(softmax_float_2) \ + FN(softmax_float_2_relaxed) \ + FN(softmax_quant8_1) \ + FN(softmax_quant8_2) \ + FN(space_to_batch) \ + FN(space_to_batch_float_1) \ + FN(space_to_batch_float_1_relaxed) \ + FN(space_to_batch_float_2) \ + FN(space_to_batch_float_2_relaxed) \ + FN(space_to_batch_float_3) \ + FN(space_to_batch_float_3_relaxed) \ + FN(space_to_batch_quant8_1) \ + FN(space_to_batch_quant8_2) \ + FN(space_to_batch_quant8_3) \ + FN(space_to_batch_relaxed) \ + FN(space_to_depth_float_1) \ + FN(space_to_depth_float_1_relaxed) \ + FN(space_to_depth_float_2) \ + FN(space_to_depth_float_2_relaxed) \ + FN(space_to_depth_float_3) \ + FN(space_to_depth_float_3_relaxed) \ + FN(space_to_depth_quant8_1) \ + FN(space_to_depth_quant8_2) \ + FN(squeeze) \ + FN(squeeze_float_1) \ + FN(squeeze_float_1_relaxed) \ + FN(squeeze_quant8_1) \ + FN(squeeze_relaxed) \ + FN(strided_slice) \ + FN(strided_slice_float_1) \ + FN(strided_slice_float_10) \ + FN(strided_slice_float_10_relaxed) \ + FN(strided_slice_float_11) \ + FN(strided_slice_float_11_relaxed) \ + FN(strided_slice_float_1_relaxed) \ + FN(strided_slice_float_2) \ + FN(strided_slice_float_2_relaxed) \ + FN(strided_slice_float_3) \ + FN(strided_slice_float_3_relaxed) \ + FN(strided_slice_float_4) \ + FN(strided_slice_float_4_relaxed) \ + FN(strided_slice_float_5) \ + FN(strided_slice_float_5_relaxed) \ + FN(strided_slice_float_6) \ + FN(strided_slice_float_6_relaxed) \ + FN(strided_slice_float_7) \ + FN(strided_slice_float_7_relaxed) \ + FN(strided_slice_float_8) \ + FN(strided_slice_float_8_relaxed) \ + FN(strided_slice_float_9) \ + FN(strided_slice_float_9_relaxed) \ + FN(strided_slice_qaunt8_10) \ + FN(strided_slice_qaunt8_11) \ + FN(strided_slice_quant8_1) \ + FN(strided_slice_quant8_2) \ + FN(strided_slice_quant8_3) \ + FN(strided_slice_quant8_4) \ + FN(strided_slice_quant8_5) \ + FN(strided_slice_quant8_6) \ + FN(strided_slice_quant8_7) \ + FN(strided_slice_quant8_8) \ + FN(strided_slice_quant8_9) \ + FN(strided_slice_relaxed) \ + FN(sub) \ + FN(sub_broadcast_float) \ + FN(sub_broadcast_float_relaxed) \ + FN(sub_relaxed) \ + FN(svdf) \ + FN(svdf2) \ + FN(svdf2_relaxed) \ + FN(svdf_relaxed) \ + FN(svdf_state) \ + FN(svdf_state_relaxed) \ + FN(tanh) \ + FN(tanh_relaxed) \ + FN(transpose) \ + FN(transpose_float_1) \ + FN(transpose_float_1_relaxed) \ + FN(transpose_quant8_1) \ + FN(transpose_relaxed) + +#define FORWARD_DECLARE_GENERATED_OBJECTS(function) \ + namespace function { \ + extern std::vector<MixedTypedExample> examples; \ + Model createTestModel(); \ + } + +FOR_EACH_TEST_MODEL(FORWARD_DECLARE_GENERATED_OBJECTS) + +#undef FORWARD_DECLARE_GENERATED_OBJECTS + +} // namespace functional +} // namespace vts +} // namespace V1_2 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android + +#endif // VTS_HAL_NEURALNETWORKS_V1_2_VTS_FUNCTIONAL_MODELS_H diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp new file mode 100644 index 0000000000..7ec6ff183e --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp @@ -0,0 +1,538 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_2 { + +using V1_0::IPreparedModel; +using V1_0::Operand; +using V1_0::OperandLifeTime; +using V1_0::OperandType; +using V1_1::ExecutionPreference; + +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message, + const Model& model) { + SCOPED_TRACE(message + " [getSupportedOperations_1_2]"); + + Return<void> ret = + device->getSupportedOperations_1_2(model, [&](ErrorStatus status, const hidl_vec<bool>&) { + EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status); + }); + EXPECT_TRUE(ret.isOk()); +} + +static void validatePrepareModel(const sp<IDevice>& device, const std::string& message, + const Model& model, ExecutionPreference preference) { + SCOPED_TRACE(message + " [prepareModel_1_2]"); + + sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return<ErrorStatus> prepareLaunchStatus = + device->prepareModel_1_2(model, preference, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus)); + + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus); + sp<IPreparedModel> preparedModel = preparedModelCallback->getPreparedModel(); + ASSERT_EQ(nullptr, preparedModel.get()); +} + +static bool validExecutionPreference(ExecutionPreference preference) { + return preference == ExecutionPreference::LOW_POWER || + preference == ExecutionPreference::FAST_SINGLE_ANSWER || + preference == ExecutionPreference::SUSTAINED_SPEED; +} + +// Primary validation function. This function will take a valid model, apply a +// mutation to it to invalidate the model, then pass it to interface calls that +// use the model. Note that the model here is passed by value, and any mutation +// to the model does not leave this function. +static void validate(const sp<IDevice>& device, const std::string& message, Model model, + const std::function<void(Model*)>& mutation, + ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) { + mutation(&model); + if (validExecutionPreference(preference)) { + validateGetSupportedOperations(device, message, model); + } + validatePrepareModel(device, message, model, preference); +} + +// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, +// so this is efficiently accomplished by moving the element to the end and +// resizing the hidl_vec to one less. +template <typename Type> +static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) { + if (vec) { + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); + } +} + +template <typename Type> +static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { + // assume vec is valid + const uint32_t index = vec->size(); + vec->resize(index + 1); + (*vec)[index] = value; + return index; +} + +static uint32_t addOperand(Model* model) { + return hidl_vec_push_back(&model->operands, + { + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }); +} + +static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { + uint32_t index = addOperand(model); + model->operands[index].numberOfConsumers = 1; + model->operands[index].lifetime = lifetime; + return index; +} + +///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// + +static const int32_t invalidOperandTypes[] = { + static_cast<int32_t>(OperandType::FLOAT32) - 1, // lower bound fundamental + static_cast<int32_t>(OperandType::TENSOR_QUANT8_ASYMM) + 1, // upper bound fundamental + static_cast<int32_t>(OperandType::OEM) - 1, // lower bound OEM + static_cast<int32_t>(OperandType::TENSOR_OEM_BYTE) + 1, // upper bound OEM +}; + +static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + for (int32_t invalidOperandType : invalidOperandTypes) { + const std::string message = "mutateOperandTypeTest: operand " + + std::to_string(operand) + " set to value " + + std::to_string(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + model->operands[operand].type = static_cast<OperandType>(invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE OPERAND RANK ///////////////////////// + +static uint32_t getInvalidRank(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + return 1; + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + case OperandType::TENSOR_QUANT8_ASYMM: + return 0; + default: + return 0; + } +} + +static void mutateOperandRankTest(const sp<IDevice>& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const uint32_t invalidRank = getInvalidRank(model.operands[operand].type); + const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) + + " has rank of " + std::to_string(invalidRank); + validate(device, message, model, [operand, invalidRank](Model* model) { + model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0); + }); + } +} + +///////////////////////// VALIDATE OPERAND SCALE ///////////////////////// + +static float getInvalidScale(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::TENSOR_FLOAT32: + return 1.0f; + case OperandType::TENSOR_INT32: + return -1.0f; + case OperandType::TENSOR_QUANT8_ASYMM: + return 0.0f; + default: + return 0.0f; + } +} + +static void mutateOperandScaleTest(const sp<IDevice>& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const float invalidScale = getInvalidScale(model.operands[operand].type); + const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) + + " has scale of " + std::to_string(invalidScale); + validate(device, message, model, [operand, invalidScale](Model* model) { + model->operands[operand].scale = invalidScale; + }); + } +} + +///////////////////////// VALIDATE OPERAND ZERO POINT ///////////////////////// + +static std::vector<int32_t> getInvalidZeroPoints(OperandType type) { + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + case OperandType::TENSOR_FLOAT32: + case OperandType::TENSOR_INT32: + return {1}; + case OperandType::TENSOR_QUANT8_ASYMM: + return {-1, 256}; + default: + return {}; + } +} + +static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector<int32_t> invalidZeroPoints = + getInvalidZeroPoints(model.operands[operand].type); + for (int32_t invalidZeroPoint : invalidZeroPoints) { + const std::string message = "mutateOperandZeroPointTest: operand " + + std::to_string(operand) + " has zero point of " + + std::to_string(invalidZeroPoint); + validate(device, message, model, [operand, invalidZeroPoint](Model* model) { + model->operands[operand].zeroPoint = invalidZeroPoint; + }); + } + } +} + +///////////////////////// VALIDATE EXTRA ??? ///////////////////////// + +// TODO: Operand::lifetime +// TODO: Operand::location + +///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// + +static void mutateOperand(Operand* operand, OperandType type) { + Operand newOperand = *operand; + newOperand.type = type; + switch (type) { + case OperandType::FLOAT32: + case OperandType::INT32: + case OperandType::UINT32: + newOperand.dimensions = hidl_vec<uint32_t>(); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_FLOAT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); + newOperand.scale = 0.0f; + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_INT32: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); + newOperand.zeroPoint = 0; + break; + case OperandType::TENSOR_QUANT8_ASYMM: + newOperand.dimensions = + operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1}); + newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f; + break; + case OperandType::OEM: + case OperandType::TENSOR_OEM_BYTE: + default: + break; + } + *operand = newOperand; +} + +static bool mutateOperationOperandTypeSkip(size_t operand, const Model& model) { + // LSH_PROJECTION's second argument is allowed to have any type. This is the + // only operation that currently has a type that can be anything independent + // from any other type. Changing the operand type to any other type will + // result in a valid model for LSH_PROJECTION. If this is the case, skip the + // test. + for (const Operation& operation : model.operations) { + if (operation.type == OperationType::LSH_PROJECTION && operand == operation.inputs[1]) { + return true; + } + } + return false; +} + +static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + if (mutateOperationOperandTypeSkip(operand, model)) { + continue; + } + for (OperandType invalidOperandType : hidl_enum_range<OperandType>{}) { + // Do not test OEM types + if (invalidOperandType == model.operands[operand].type || + invalidOperandType == OperandType::OEM || + invalidOperandType == OperandType::TENSOR_OEM_BYTE) { + continue; + } + const std::string message = "mutateOperationOperandTypeTest: operand " + + std::to_string(operand) + " set to type " + + toString(invalidOperandType); + validate(device, message, model, [operand, invalidOperandType](Model* model) { + mutateOperand(&model->operands[operand], invalidOperandType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// + +static const int32_t invalidOperationTypes[] = { + static_cast<int32_t>(OperationType::ADD) - 1, // lower bound fundamental + static_cast<int32_t>(OperationType::TRANSPOSE) + 1, // upper bound fundamental + static_cast<int32_t>(OperationType::OEM_OPERATION) - 1, // lower bound OEM + static_cast<int32_t>(OperationType::OEM_OPERATION) + 1, // upper bound OEM +}; + +static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (int32_t invalidOperationType : invalidOperationTypes) { + const std::string message = "mutateOperationTypeTest: operation " + + std::to_string(operation) + " set to value " + + std::to_string(invalidOperationType); + validate(device, message, model, [operation, invalidOperationType](Model* model) { + model->operations[operation].type = + static_cast<OperationType>(invalidOperationType); + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const std::string message = "mutateOperationInputOperandIndexTest: operation " + + std::to_string(operation) + " input " + + std::to_string(input); + validate(device, message, model, [operation, input, invalidOperand](Model* model) { + model->operations[operation].inputs[input] = invalidOperand; + }); + } + } +} + +///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX ///////////////////////// + +static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const uint32_t invalidOperand = model.operands.size(); + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "mutateOperationOutputOperandIndexTest: operation " + + std::to_string(operation) + " output " + + std::to_string(output); + validate(device, message, model, [operation, output, invalidOperand](Model* model) { + model->operations[operation].outputs[output] = invalidOperand; + }); + } + } +} + +///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// + +static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) { + if (vec) { + // remove elements matching "value" + auto last = std::remove(vec->begin(), vec->end(), value); + vec->resize(std::distance(vec->begin(), last)); + + // decrement elements exceeding "value" + std::transform(vec->begin(), vec->end(), vec->begin(), + [value](uint32_t v) { return v > value ? v-- : v; }); + } +} + +static void removeOperand(Model* model, uint32_t index) { + hidl_vec_removeAt(&model->operands, index); + for (Operation& operation : model->operations) { + removeValueAndDecrementGreaterValues(&operation.inputs, index); + removeValueAndDecrementGreaterValues(&operation.outputs, index); + } + removeValueAndDecrementGreaterValues(&model->inputIndexes, index); + removeValueAndDecrementGreaterValues(&model->outputIndexes, index); +} + +static void removeOperandTest(const sp<IDevice>& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::string message = "removeOperandTest: operand " + std::to_string(operand); + validate(device, message, model, + [operand](Model* model) { removeOperand(model, operand); }); + } +} + +///////////////////////// REMOVE OPERATION ///////////////////////// + +static void removeOperation(Model* model, uint32_t index) { + for (uint32_t operand : model->operations[index].inputs) { + model->operands[operand].numberOfConsumers--; + } + hidl_vec_removeAt(&model->operations, index); +} + +static void removeOperationTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "removeOperationTest: operation " + std::to_string(operation); + validate(device, message, model, + [operation](Model* model) { removeOperation(model, operation); }); + } +} + +///////////////////////// REMOVE OPERATION INPUT ///////////////////////// + +static void removeOperationInputTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) { + const Operation& op = model.operations[operation]; + // CONCATENATION has at least 2 inputs, with the last element being + // INT32. Skip this test if removing one of CONCATENATION's + // inputs still produces a valid model. + if (op.type == OperationType::CONCATENATION && op.inputs.size() > 2 && + input != op.inputs.size() - 1) { + continue; + } + const std::string message = "removeOperationInputTest: operation " + + std::to_string(operation) + ", input " + + std::to_string(input); + validate(device, message, model, [operation, input](Model* model) { + uint32_t operand = model->operations[operation].inputs[input]; + model->operands[operand].numberOfConsumers--; + hidl_vec_removeAt(&model->operations[operation].inputs, input); + }); + } + } +} + +///////////////////////// REMOVE OPERATION OUTPUT ///////////////////////// + +static void removeOperationOutputTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) { + const std::string message = "removeOperationOutputTest: operation " + + std::to_string(operation) + ", output " + + std::to_string(output); + validate(device, message, model, [operation, output](Model* model) { + hidl_vec_removeAt(&model->operations[operation].outputs, output); + }); + } + } +} + +///////////////////////// MODEL VALIDATION ///////////////////////// + +// TODO: remove model input +// TODO: remove model output +// TODO: add unused operation + +///////////////////////// ADD OPERATION INPUT ///////////////////////// + +static void addOperationInputTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = "addOperationInputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT); + hidl_vec_push_back(&model->operations[operation].inputs, index); + hidl_vec_push_back(&model->inputIndexes, index); + }); + } +} + +///////////////////////// ADD OPERATION OUTPUT ///////////////////////// + +static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const std::string message = + "addOperationOutputTest: operation " + std::to_string(operation); + validate(device, message, model, [operation](Model* model) { + uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT); + hidl_vec_push_back(&model->operations[operation].outputs, index); + hidl_vec_push_back(&model->outputIndexes, index); + }); + } +} + +///////////////////////// VALIDATE EXECUTION PREFERENCE ///////////////////////// + +static const int32_t invalidExecutionPreferences[] = { + static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound + static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound +}; + +static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) { + for (int32_t preference : invalidExecutionPreferences) { + const std::string message = + "mutateExecutionPreferenceTest: preference " + std::to_string(preference); + validate(device, message, model, [](Model*) {}, + static_cast<ExecutionPreference>(preference)); + } +} + +////////////////////////// ENTRY POINT ////////////////////////////// + +void ValidationTest::validateModel(const Model& model) { + mutateOperandTypeTest(device, model); + mutateOperandRankTest(device, model); + mutateOperandScaleTest(device, model); + mutateOperandZeroPointTest(device, model); + mutateOperationOperandTypeTest(device, model); + mutateOperationTypeTest(device, model); + mutateOperationInputOperandIndexTest(device, model); + mutateOperationOutputOperandIndexTest(device, model); + removeOperandTest(device, model); + removeOperationTest(device, model); + removeOperationInputTest(device, model); + removeOperationOutputTest(device, model); + addOperationInputTest(device, model); + addOperationOutputTest(device, model); + mutateExecutionPreferenceTest(device, model); +} + +} // namespace functional +} // namespace vts +} // namespace V1_2 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp new file mode 100644 index 0000000000..f4476fa07a --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +#include "Callbacks.h" +#include "TestHarness.h" +#include "Utils.h" + +#include <android-base/logging.h> +#include <android/hidl/memory/1.0/IMemory.h> +#include <hidlmemory/mapping.h> + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_2 { +namespace vts { +namespace functional { + +using ::android::hardware::neuralnetworks::V1_0::implementation::ExecutionCallback; +using ::android::hardware::neuralnetworks::V1_0::implementation::PreparedModelCallback; +using ::android::hidl::memory::V1_0::IMemory; +using test_helper::for_all; +using test_helper::MixedTyped; +using test_helper::MixedTypedExampleType; + +///////////////////////// UTILITY FUNCTIONS ///////////////////////// + +static void createPreparedModel(const sp<IDevice>& device, const Model& model, + sp<IPreparedModel>* preparedModel) { + ASSERT_NE(nullptr, preparedModel); + + // see if service can handle model + bool fullySupportsModel = false; + Return<void> supportedOpsLaunchStatus = device->getSupportedOperations_1_2( + model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) { + ASSERT_EQ(ErrorStatus::NONE, status); + ASSERT_NE(0ul, supported.size()); + fullySupportsModel = + std::all_of(supported.begin(), supported.end(), [](bool valid) { return valid; }); + }); + ASSERT_TRUE(supportedOpsLaunchStatus.isOk()); + + // launch prepare model + sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback(); + ASSERT_NE(nullptr, preparedModelCallback.get()); + Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_2( + model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); + ASSERT_TRUE(prepareLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus)); + + // retrieve prepared model + preparedModelCallback->wait(); + ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus(); + *preparedModel = preparedModelCallback->getPreparedModel(); + + // The getSupportedOperations_1_2 call returns a list of operations that are + // guaranteed not to fail if prepareModel_1_2 is called, and + // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed. + // If a driver has any doubt that it can prepare an operation, it must + // return false. So here, if a driver isn't sure if it can support an + // operation, but reports that it successfully prepared the model, the test + // can continue. + if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) { + ASSERT_EQ(nullptr, preparedModel->get()); + LOG(INFO) << "NN VTS: Unable to test Request validation because vendor service cannot " + "prepare model that it does not support."; + std::cout << "[ ] Unable to test Request validation because vendor service " + "cannot prepare model that it does not support." + << std::endl; + return; + } + ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus); + ASSERT_NE(nullptr, preparedModel->get()); +} + +// Primary validation function. This function will take a valid request, apply a +// mutation to it to invalidate the request, then pass it to interface calls +// that use the request. Note that the request here is passed by value, and any +// mutation to the request does not leave this function. +static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message, + Request request, const std::function<void(Request*)>& mutation) { + mutation(&request); + SCOPED_TRACE(message + " [execute]"); + + sp<ExecutionCallback> executionCallback = new ExecutionCallback(); + ASSERT_NE(nullptr, executionCallback.get()); + Return<ErrorStatus> executeLaunchStatus = preparedModel->execute(request, executionCallback); + ASSERT_TRUE(executeLaunchStatus.isOk()); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus)); + + executionCallback->wait(); + ErrorStatus executionReturnStatus = executionCallback->getStatus(); + ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus); +} + +// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, +// so this is efficiently accomplished by moving the element to the end and +// resizing the hidl_vec to one less. +template <typename Type> +static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) { + if (vec) { + std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); + vec->resize(vec->size() - 1); + } +} + +template <typename Type> +static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { + // assume vec is valid + const uint32_t index = vec->size(); + vec->resize(index + 1); + (*vec)[index] = value; + return index; +} + +///////////////////////// REMOVE INPUT //////////////////////////////////// + +static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { + for (size_t input = 0; input < request.inputs.size(); ++input) { + const std::string message = "removeInput: removed input " + std::to_string(input); + validate(preparedModel, message, request, + [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); }); + } +} + +///////////////////////// REMOVE OUTPUT //////////////////////////////////// + +static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) { + for (size_t output = 0; output < request.outputs.size(); ++output) { + const std::string message = "removeOutput: removed Output " + std::to_string(output); + validate(preparedModel, message, request, + [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); }); + } +} + +///////////////////////////// ENTRY POINT ////////////////////////////////// + +std::vector<Request> createRequests(const std::vector<MixedTypedExampleType>& examples) { + const uint32_t INPUT = 0; + const uint32_t OUTPUT = 1; + + std::vector<Request> requests; + + for (auto& example : examples) { + const MixedTyped& inputs = example.first; + const MixedTyped& outputs = example.second; + + std::vector<RequestArgument> inputs_info, outputs_info; + uint32_t inputSize = 0, outputSize = 0; + + // This function only partially specifies the metadata (vector of RequestArguments). + // The contents are copied over below. + for_all(inputs, [&inputs_info, &inputSize](int index, auto, auto s) { + if (inputs_info.size() <= static_cast<size_t>(index)) inputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = INPUT, .offset = 0, .length = static_cast<uint32_t>(s)}, + .dimensions = {}, + }; + RequestArgument arg_empty = { + .hasNoValue = true, + }; + inputs_info[index] = s ? arg : arg_empty; + inputSize += s; + }); + // Compute offset for inputs 1 and so on + { + size_t offset = 0; + for (auto& i : inputs_info) { + if (!i.hasNoValue) i.location.offset = offset; + offset += i.location.length; + } + } + + // Go through all outputs, initialize RequestArgument descriptors + for_all(outputs, [&outputs_info, &outputSize](int index, auto, auto s) { + if (outputs_info.size() <= static_cast<size_t>(index)) outputs_info.resize(index + 1); + RequestArgument arg = { + .location = {.poolIndex = OUTPUT, .offset = 0, .length = static_cast<uint32_t>(s)}, + .dimensions = {}, + }; + outputs_info[index] = arg; + outputSize += s; + }); + // Compute offset for outputs 1 and so on + { + size_t offset = 0; + for (auto& i : outputs_info) { + i.location.offset = offset; + offset += i.location.length; + } + } + std::vector<hidl_memory> pools = {nn::allocateSharedMemory(inputSize), + nn::allocateSharedMemory(outputSize)}; + if (pools[INPUT].size() == 0 || pools[OUTPUT].size() == 0) { + return {}; + } + + // map pool + sp<IMemory> inputMemory = mapMemory(pools[INPUT]); + if (inputMemory == nullptr) { + return {}; + } + char* inputPtr = reinterpret_cast<char*>(static_cast<void*>(inputMemory->getPointer())); + if (inputPtr == nullptr) { + return {}; + } + + // initialize pool + inputMemory->update(); + for_all(inputs, [&inputs_info, inputPtr](int index, auto p, auto s) { + char* begin = (char*)p; + char* end = begin + s; + // TODO: handle more than one input + std::copy(begin, end, inputPtr + inputs_info[index].location.offset); + }); + inputMemory->commit(); + + requests.push_back({.inputs = inputs_info, .outputs = outputs_info, .pools = pools}); + } + + return requests; +} + +void ValidationTest::validateRequests(const Model& model, const std::vector<Request>& requests) { + // create IPreparedModel + sp<IPreparedModel> preparedModel; + ASSERT_NO_FATAL_FAILURE(createPreparedModel(device, model, &preparedModel)); + if (preparedModel == nullptr) { + return; + } + + // validate each request + for (const Request& request : requests) { + removeInputTest(preparedModel, request); + removeOutputTest(preparedModel, request); + } +} + +} // namespace functional +} // namespace vts +} // namespace V1_2 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.2/vts/functional/ValidationTests.cpp b/neuralnetworks/1.2/vts/functional/ValidationTests.cpp new file mode 100644 index 0000000000..3bdc5cdea7 --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/ValidationTests.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "Models.h" +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_2 { +namespace vts { +namespace functional { + +// forward declarations +std::vector<Request> createRequests(const std::vector<MixedTypedExample>& examples); + +// generate validation tests +#define VTS_CURRENT_TEST_CASE(TestName) \ + TEST_F(ValidationTest, TestName) { \ + const Model model = TestName::createTestModel(); \ + const std::vector<Request> requests = createRequests(TestName::examples); \ + validateModel(model); \ + validateRequests(model, requests); \ + } + +FOR_EACH_TEST_MODEL(VTS_CURRENT_TEST_CASE) + +#undef VTS_CURRENT_TEST_CASE + +} // namespace functional +} // namespace vts +} // namespace V1_2 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp new file mode 100644 index 0000000000..90a910c6f0 --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "neuralnetworks_hidl_hal_test" + +#include "VtsHalNeuralnetworks.h" + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_2 { +namespace vts { +namespace functional { + +// A class for test environment setup +NeuralnetworksHidlEnvironment::NeuralnetworksHidlEnvironment() {} + +NeuralnetworksHidlEnvironment::~NeuralnetworksHidlEnvironment() {} + +NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() { + // This has to return a "new" object because it is freed inside + // ::testing::AddGlobalTestEnvironment when the gtest is being torn down + static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment(); + return instance; +} + +void NeuralnetworksHidlEnvironment::registerTestServices() { + registerTestService<IDevice>(); +} + +// The main test class for NEURALNETWORK HIDL HAL. +NeuralnetworksHidlTest::NeuralnetworksHidlTest() {} + +NeuralnetworksHidlTest::~NeuralnetworksHidlTest() {} + +void NeuralnetworksHidlTest::SetUp() { + ::testing::VtsHalHidlTargetTestBase::SetUp(); + device = ::testing::VtsHalHidlTargetTestBase::getService<IDevice>( + NeuralnetworksHidlEnvironment::getInstance()); + ASSERT_NE(nullptr, device.get()); +} + +void NeuralnetworksHidlTest::TearDown() { + device = nullptr; + ::testing::VtsHalHidlTargetTestBase::TearDown(); +} + +} // namespace functional +} // namespace vts + +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) { + return os << toString(errorStatus); +} + +::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus) { + return os << toString(deviceStatus); +} + +} // namespace V1_2 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android + +using android::hardware::neuralnetworks::V1_2::vts::functional::NeuralnetworksHidlEnvironment; + +int main(int argc, char** argv) { + ::testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance()); + ::testing::InitGoogleTest(&argc, argv); + NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv); + + int status = RUN_ALL_TESTS(); + return status; +} diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h new file mode 100644 index 0000000000..a87d788c47 --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VTS_HAL_NEURALNETWORKS_V1_2_H +#define VTS_HAL_NEURALNETWORKS_V1_2_H + +#include <android/hardware/neuralnetworks/1.0/types.h> +#include <android/hardware/neuralnetworks/1.1/types.h> +#include <android/hardware/neuralnetworks/1.2/IDevice.h> +#include <android/hardware/neuralnetworks/1.2/types.h> + +#include <VtsHalHidlTargetTestBase.h> +#include <VtsHalHidlTargetTestEnvBase.h> + +#include <android-base/macros.h> +#include <gtest/gtest.h> +#include <iostream> +#include <vector> + +namespace android { +namespace hardware { +namespace neuralnetworks { +namespace V1_2 { + +using V1_0::DeviceStatus; +using V1_0::ErrorStatus; +using V1_0::Request; + +namespace vts { +namespace functional { + +// A class for test environment setup +class NeuralnetworksHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase { + DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment); + NeuralnetworksHidlEnvironment(); + ~NeuralnetworksHidlEnvironment() override; + + public: + static NeuralnetworksHidlEnvironment* getInstance(); + void registerTestServices() override; +}; + +// The main test class for NEURALNETWORKS HIDL HAL. +class NeuralnetworksHidlTest : public ::testing::VtsHalHidlTargetTestBase { + DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest); + + public: + NeuralnetworksHidlTest(); + ~NeuralnetworksHidlTest() override; + void SetUp() override; + void TearDown() override; + + protected: + sp<IDevice> device; +}; + +// Tag for the validation tests +class ValidationTest : public NeuralnetworksHidlTest { + protected: + void validateModel(const Model& model); + void validateRequests(const Model& model, const std::vector<Request>& request); +}; + +// Tag for the generated tests +class GeneratedTest : public NeuralnetworksHidlTest {}; + +} // namespace functional +} // namespace vts + +// pretty-print values for error messages +::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus); +::std::ostream& operator<<(::std::ostream& os, DeviceStatus deviceStatus); + +} // namespace V1_2 +} // namespace neuralnetworks +} // namespace hardware +} // namespace android + +#endif // VTS_HAL_NEURALNETWORKS_V1_2_H |
