diff options
50 files changed, 3056 insertions, 166 deletions
diff --git a/audio/common/all-versions/default/service/android.hardware.audio.service.rc b/audio/common/all-versions/default/service/android.hardware.audio.service.rc index 63d2542498..f7e1e244d5 100644 --- a/audio/common/all-versions/default/service/android.hardware.audio.service.rc +++ b/audio/common/all-versions/default/service/android.hardware.audio.service.rc @@ -5,5 +5,5 @@ service vendor.audio-hal /vendor/bin/hw/android.hardware.audio.service group audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct wakelock capabilities BLOCK_SUSPEND ioprio rt 4 - writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks + task_profiles ProcessCapacityHigh HighPerformance onrestart restart audioserver diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp b/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp index bdc52448e7..02c00c1e04 100644 --- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp +++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp @@ -175,9 +175,7 @@ StatusCode EmulatedVehicleHal::set(const VehiclePropValue& propValue) { // here, since we never send the control signal back, the value of 'updateStatus' flag // does not matter here. auto status = mVehicleClient->setProperty(propValue, updateStatus); - if (status != StatusCode::OK) { - return status; - } + return status; } else if (mHvacPowerProps.count(propValue.prop)) { auto hvacPowerOn = mPropStore->readValueOrNull( toInt(VehicleProperty::HVAC_POWER_ON), diff --git a/automotive/vehicle/2.0/types.hal b/automotive/vehicle/2.0/types.hal index 82f938c611..ee34e420c9 100644 --- a/automotive/vehicle/2.0/types.hal +++ b/automotive/vehicle/2.0/types.hal @@ -606,8 +606,23 @@ enum VehicleProperty : int32_t { /** * Tire pressure * - * min/max value indicates tire pressure sensor range. Each tire will have a separate min/max - * value denoted by its areaConfig.areaId. + * Each tires is identified by its areaConfig.areaId config and their + * minFloatValue/maxFloatValue are used to store OEM recommended pressure + * range. + * The Min value in the areaConfig data represents the lower bound of + * the recommended tire pressure. + * The Max value in the areaConfig data represents the upper bound of + * the recommended tire pressure. + * For example: + * The following areaConfig indicates the recommended tire pressure + * of left_front tire is from 200.0 KILOPASCAL to 240.0 KILOPASCAL. + * .areaConfigs = { + * VehicleAreaConfig { + * .areaId = VehicleAreaWheel::LEFT_FRONT, + * .minFloatValue = 200.0, + * .maxFloatValue = 240.0, + * } + * }, * * @change_mode VehiclePropertyChangeMode:CONTINUOUS * @access VehiclePropertyAccess:READ @@ -786,7 +801,8 @@ enum VehicleProperty : int32_t { /* * HVAC Properties * - * Additional rules for mapping a zoned HVAC property to AreaIDs: + * Additional rules for mapping a zoned HVAC property (except + * HVAC_MAX_DEFROST_ON) to AreaIDs: * - Every seat in VehicleAreaSeat that is available in the car, must be * part of an AreaID in the AreaID array. * @@ -919,6 +935,11 @@ enum VehicleProperty : int32_t { * possible. Any parameters modified as a side effect of turning on/off * the MAX DEFROST parameter shall generate onPropertyEvent() callbacks to * the VHAL. + * The AreaIDs for HVAC_MAX_DEFROST_ON indicate MAX DEFROST can be controlled + * in the area. + * For example: + * areaConfig.areaId = {ROW_1_LEFT | ROW_1_RIGHT} indicates HVAC_MAX_DEFROST_ON + * only can be controlled for the front rows. * * @change_mode VehiclePropertyChangeMode:ON_CHANGE * @access VehiclePropertyAccess:READ_WRITE diff --git a/bluetooth/1.0/default/android.hardware.bluetooth@1.0-service.rc b/bluetooth/1.0/default/android.hardware.bluetooth@1.0-service.rc index 9fa128dfc6..def59de566 100644 --- a/bluetooth/1.0/default/android.hardware.bluetooth@1.0-service.rc +++ b/bluetooth/1.0/default/android.hardware.bluetooth@1.0-service.rc @@ -4,5 +4,5 @@ service vendor.bluetooth-1-0 /vendor/bin/hw/android.hardware.bluetooth@1.0-servi capabilities BLOCK_SUSPEND NET_ADMIN SYS_NICE user bluetooth group bluetooth - writepid /dev/stune/foreground/tasks + task_profiles HighPerformance diff --git a/bluetooth/1.1/default/android.hardware.bluetooth@1.1-service.rc b/bluetooth/1.1/default/android.hardware.bluetooth@1.1-service.rc index 49f0be3c10..5c7cbf4b3a 100644 --- a/bluetooth/1.1/default/android.hardware.bluetooth@1.1-service.rc +++ b/bluetooth/1.1/default/android.hardware.bluetooth@1.1-service.rc @@ -5,5 +5,5 @@ service vendor.bluetooth-1-1 /vendor/bin/hw/android.hardware.bluetooth@1.1-servi capabilities BLOCK_SUSPEND NET_ADMIN SYS_NICE user bluetooth group bluetooth - writepid /dev/stune/foreground/tasks + task_profiles HighPerformance diff --git a/boot/1.1/vts/functional/VtsHalBootV1_1TargetTest.cpp b/boot/1.1/vts/functional/VtsHalBootV1_1TargetTest.cpp index 7c58ef3717..30b965ddbb 100644 --- a/boot/1.1/vts/functional/VtsHalBootV1_1TargetTest.cpp +++ b/boot/1.1/vts/functional/VtsHalBootV1_1TargetTest.cpp @@ -76,7 +76,11 @@ TEST_P(BootHidlTest, SetSnapshotMergeStatus) { for (const auto value : ValidMergeStatusValues()) { EXPECT_TRUE(boot->setSnapshotMergeStatus(value).withDefault(false)); auto status = boot->getSnapshotMergeStatus(); - EXPECT_EQ(status, value); + if (value == MergeStatus::SNAPSHOTTED) { + EXPECT_TRUE(status == MergeStatus::SNAPSHOTTED || status == MergeStatus::NONE); + } else { + EXPECT_EQ(status, value); + } } } diff --git a/camera/provider/2.4/default/android.hardware.camera.provider@2.4-external-service.rc b/camera/provider/2.4/default/android.hardware.camera.provider@2.4-external-service.rc index 64cf321d83..52ade977b3 100644 --- a/camera/provider/2.4/default/android.hardware.camera.provider@2.4-external-service.rc +++ b/camera/provider/2.4/default/android.hardware.camera.provider@2.4-external-service.rc @@ -5,4 +5,4 @@ service vendor.camera-provider-2-4-ext /vendor/bin/hw/android.hardware.camera.pr group audio camera input drmrpc usb ioprio rt 4 capabilities SYS_NICE - writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks + task_profiles CameraServiceCapacity MaxPerformance diff --git a/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service-lazy.rc b/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service-lazy.rc index e8549ed82d..63ded900c0 100644 --- a/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service-lazy.rc +++ b/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service-lazy.rc @@ -7,4 +7,4 @@ service vendor.camera-provider-2-4 /vendor/bin/hw/android.hardware.camera.provid group audio camera input drmrpc ioprio rt 4 capabilities SYS_NICE - writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks + task_profiles CameraServiceCapacity MaxPerformance diff --git a/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service-lazy_64.rc b/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service-lazy_64.rc index 2dfac764eb..953d1af7e4 100644 --- a/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service-lazy_64.rc +++ b/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service-lazy_64.rc @@ -7,4 +7,4 @@ service vendor.camera-provider-2-4 /vendor/bin/hw/android.hardware.camera.provid group audio camera input drmrpc ioprio rt 4 capabilities SYS_NICE - writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks + task_profiles CameraServiceCapacity MaxPerformance diff --git a/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service.rc b/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service.rc index 5c8762bf87..ff924ed360 100644 --- a/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service.rc +++ b/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service.rc @@ -5,4 +5,4 @@ service vendor.camera-provider-2-4 /vendor/bin/hw/android.hardware.camera.provid group audio camera input drmrpc ioprio rt 4 capabilities SYS_NICE - writepid /dev/cpuset/camera-daemon/tasks /dev/stune/foreground/tasks + task_profiles CameraServiceCapacity HighPerformance diff --git a/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service_64.rc b/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service_64.rc index 3042db1beb..642e84e5b0 100644 --- a/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service_64.rc +++ b/camera/provider/2.4/default/android.hardware.camera.provider@2.4-service_64.rc @@ -5,4 +5,4 @@ service vendor.camera-provider-2-4 /vendor/bin/hw/android.hardware.camera.provid group audio camera input drmrpc ioprio rt 4 capabilities SYS_NICE - writepid /dev/cpuset/camera-daemon/tasks /dev/stune/foreground/tasks + task_profiles CameraServiceCapacity HighPerformance diff --git a/camera/provider/2.5/default/android.hardware.camera.provider@2.5-external-service.rc b/camera/provider/2.5/default/android.hardware.camera.provider@2.5-external-service.rc index 107097e661..b3b06b2391 100644 --- a/camera/provider/2.5/default/android.hardware.camera.provider@2.5-external-service.rc +++ b/camera/provider/2.5/default/android.hardware.camera.provider@2.5-external-service.rc @@ -6,4 +6,4 @@ service vendor.camera-provider-2-5-ext /vendor/bin/hw/android.hardware.camera.pr group audio camera input drmrpc usb ioprio rt 4 capabilities SYS_NICE - writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks + task_profiles CameraServiceCapacity MaxPerformance diff --git a/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service-lazy.rc b/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service-lazy.rc index b45158a082..7c5e69b070 100644 --- a/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service-lazy.rc +++ b/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service-lazy.rc @@ -8,4 +8,4 @@ service vendor.camera-provider-2-5 /vendor/bin/hw/android.hardware.camera.provid group audio camera input drmrpc ioprio rt 4 capabilities SYS_NICE - writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks + task_profiles CameraServiceCapacity MaxPerformance diff --git a/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service-lazy_64.rc b/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service-lazy_64.rc index 955b28e6b4..49bca8f93c 100644 --- a/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service-lazy_64.rc +++ b/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service-lazy_64.rc @@ -8,4 +8,4 @@ service vendor.camera-provider-2-5 /vendor/bin/hw/android.hardware.camera.provid group audio camera input drmrpc ioprio rt 4 capabilities SYS_NICE - writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks + task_profiles CameraServiceCapacity MaxPerformance diff --git a/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service.rc b/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service.rc index c065815fab..4bd1fb4078 100644 --- a/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service.rc +++ b/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service.rc @@ -6,4 +6,4 @@ service vendor.camera-provider-2-5 /vendor/bin/hw/android.hardware.camera.provid group audio camera input drmrpc ioprio rt 4 capabilities SYS_NICE - writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks + task_profiles CameraServiceCapacity MaxPerformance diff --git a/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service_64.rc b/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service_64.rc index 63dd11d349..b4443256cf 100644 --- a/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service_64.rc +++ b/camera/provider/2.5/default/android.hardware.camera.provider@2.5-service_64.rc @@ -6,4 +6,4 @@ service vendor.camera-provider-2-5 /vendor/bin/hw/android.hardware.camera.provid group audio camera input drmrpc ioprio rt 4 capabilities SYS_NICE - writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks + task_profiles CameraServiceCapacity MaxPerformance diff --git a/neuralnetworks/1.0/vts/functional/BasicTests.cpp b/neuralnetworks/1.0/vts/functional/BasicTests.cpp index cc44c9efe1..bda43b1986 100644 --- a/neuralnetworks/1.0/vts/functional/BasicTests.cpp +++ b/neuralnetworks/1.0/vts/functional/BasicTests.cpp @@ -18,8 +18,12 @@ #include "VtsHalNeuralnetworks.h" +#include "1.0/Callbacks.h" + namespace android::hardware::neuralnetworks::V1_0::vts::functional { +using implementation::PreparedModelCallback; + // create device test TEST_P(NeuralnetworksHidlTest, CreateDevice) {} @@ -43,4 +47,136 @@ TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) { EXPECT_TRUE(ret.isOk()); } +// detect cycle +TEST_P(NeuralnetworksHidlTest, CycleTest) { + // opnd0 = TENSOR_FLOAT32 // model input + // opnd1 = TENSOR_FLOAT32 // model input + // opnd2 = INT32 // model input + // opnd3 = ADD(opnd0, opnd4, opnd2) + // opnd4 = ADD(opnd1, opnd3, opnd2) + // opnd5 = ADD(opnd4, opnd0, opnd2) // model output + // + // +-----+ + // | | + // v | + // 3 = ADD(0, 4, 2) | + // | | + // +----------+ | + // | | + // v | + // 4 = ADD(1, 3, 2) | + // | | + // +----------------+ + // | + // | + // +-------+ + // | + // v + // 5 = ADD(4, 0, 2) + + const std::vector<Operand> operands = { + { + // operands[0] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 2, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[1] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[2] + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 3, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[3] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[4] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 2, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[5] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_OUTPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + }; + + const std::vector<Operation> operations = { + {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}}, + {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}}, + {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}}, + }; + + const Model model = { + .operands = operands, + .operations = operations, + .inputIndexes = {0, 1, 2}, + .outputIndexes = {5}, + .operandValues = {}, + .pools = {}, + }; + + // ensure that getSupportedOperations() checks model validity + ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE; + Return<void> supportedOpsReturn = kDevice->getSupportedOperations( + model, [&model, &supportedOpsErrorStatus](ErrorStatus status, + const hidl_vec<bool>& supported) { + supportedOpsErrorStatus = status; + if (status == ErrorStatus::NONE) { + ASSERT_EQ(supported.size(), model.operations.size()); + } + }); + ASSERT_TRUE(supportedOpsReturn.isOk()); + ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT); + + // ensure that prepareModel() checks model validity + sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback; + Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel(model, preparedModelCallback); + ASSERT_TRUE(prepareLaunchReturn.isOk()); + // Note that preparation can fail for reasons other than an + // invalid model (invalid model should result in + // INVALID_ARGUMENT) -- for example, perhaps not all + // operations are supported, or perhaps the device hit some + // kind of capacity limit. + EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE); + EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE); + EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr); +} + } // namespace android::hardware::neuralnetworks::V1_0::vts::functional diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp index 3613e69088..32850b060c 100644 --- a/neuralnetworks/1.0/vts/functional/Utils.cpp +++ b/neuralnetworks/1.0/vts/functional/Utils.cpp @@ -29,7 +29,11 @@ #include <gtest/gtest.h> #include <algorithm> +#include <cstring> +#include <functional> #include <iostream> +#include <map> +#include <numeric> #include <vector> namespace android::hardware::neuralnetworks { @@ -172,6 +176,45 @@ std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& reques return outputBuffers; } +uint32_t sizeOfData(V1_0::OperandType type) { + switch (type) { + case V1_0::OperandType::FLOAT32: + case V1_0::OperandType::INT32: + case V1_0::OperandType::UINT32: + case V1_0::OperandType::TENSOR_FLOAT32: + case V1_0::OperandType::TENSOR_INT32: + return 4; + case V1_0::OperandType::TENSOR_QUANT8_ASYMM: + return 1; + default: + CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); + return 0; + } +} + +static bool isTensor(V1_0::OperandType type) { + switch (type) { + case V1_0::OperandType::FLOAT32: + case V1_0::OperandType::INT32: + case V1_0::OperandType::UINT32: + return false; + case V1_0::OperandType::TENSOR_FLOAT32: + case V1_0::OperandType::TENSOR_INT32: + case V1_0::OperandType::TENSOR_QUANT8_ASYMM: + return true; + default: + CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); + return false; + } +} + +uint32_t sizeOfData(const V1_0::Operand& operand) { + const uint32_t dataSize = sizeOfData(operand.type); + if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0; + return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize, + std::multiplies<>{}); +} + std::string gtestCompliantName(std::string name) { // gtest test names must only contain alphanumeric characters std::replace_if( diff --git a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp index 79d85943b1..5ffbd4328c 100644 --- a/neuralnetworks/1.0/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.0/vts/functional/ValidateModel.cpp @@ -17,9 +17,14 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" #include "1.0/Callbacks.h" +#include "1.0/Utils.h" #include "GeneratedTestHarness.h" #include "VtsHalNeuralnetworks.h" +#include <optional> +#include <type_traits> +#include <utility> + namespace android::hardware::neuralnetworks::V1_0::vts::functional { using implementation::PreparedModelCallback; @@ -67,26 +72,6 @@ static void validate(const sp<IDevice>& device, const std::string& message, validatePrepareModel(device, message, model); } -// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation, -// so this is efficiently accomplished by moving the element to the end and -// resizing the hidl_vec to one less. -template <typename Type> -static void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) { - if (vec) { - std::rotate(vec->begin() + index, vec->begin() + index + 1, vec->end()); - vec->resize(vec->size() - 1); - } -} - -template <typename Type> -static uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { - // assume vec is valid - const uint32_t index = vec->size(); - vec->resize(index + 1); - (*vec)[index] = value; - return index; -} - static uint32_t addOperand(Model* model) { return hidl_vec_push_back(&model->operands, { @@ -107,6 +92,211 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { return index; } +// If we introduce a CONSTANT_COPY for an operand of size operandSize, +// how much will this increase the size of the model? This assumes +// that we can (re)use all of model.operandValues for the operand +// value. +static size_t constantCopyExtraSize(const Model& model, size_t operandSize) { + const size_t operandValuesSize = model.operandValues.size(); + return (operandValuesSize < operandSize) ? (operandSize - operandValuesSize) : 0; +} + +// Highly specialized utility routine for converting an operand to +// CONSTANT_COPY lifetime. +// +// Expects that: +// - operand has a known size +// - operand->lifetime has already been set to CONSTANT_COPY +// - operand->location has been zeroed out +// +// Does the following: +// - initializes operand->location to point to the beginning of model->operandValues +// - resizes model->operandValues (if necessary) to be large enough for the operand +// value, padding it with zeroes on the end +// +// Potential problem: +// By changing the operand to CONSTANT_COPY lifetime, this function is effectively initializing the +// operand with unspecified (but deterministic) data. This means that the model may be invalidated +// in two ways: not only is the lifetime of CONSTANT_COPY invalid, but the operand's value in the +// graph may also be invalid (e.g., if the operand is used as an activation code and has an invalid +// value). For now, this should be fine because it just means we're not testing what we think we're +// testing in certain cases; but we can handwave this and assume we're probabilistically likely to +// exercise the validation code over the span of the entire test set and operand space. +// +// Aborts if the specified operand type is an extension type or OEM type. +static void becomeConstantCopy(Model* model, Operand* operand) { + // sizeOfData will abort if the specified type is an extension type or OEM type. + const size_t sizeOfOperand = sizeOfData(*operand); + EXPECT_NE(sizeOfOperand, size_t(0)); + operand->location.poolIndex = 0; + operand->location.offset = 0; + operand->location.length = sizeOfOperand; + if (model->operandValues.size() < sizeOfOperand) { + model->operandValues.resize(sizeOfOperand); + } +} + +// The sizeForBinder() functions estimate the size of the +// representation of a value when sent to binder. It's probably a bit +// of an under-estimate, because we don't know the size of the +// metadata in the binder format (e.g., representation of the size of +// a vector); but at least it adds up "big" things like vector +// contents. However, it doesn't treat inter-field or end-of-struct +// padding in a methodical way -- there's no attempt to be consistent +// in whether or not padding in the native (C++) representation +// contributes to the estimated size for the binder representation; +// and there's no attempt to understand what padding (if any) is +// needed in the binder representation. +// +// This assumes that non-metadata uses a fixed length encoding (e.g., +// a uint32_t is always encoded in sizeof(uint32_t) bytes, rather than +// using an encoding whose length is related to the magnitude of the +// encoded value). + +template <typename Type> +static size_t sizeForBinder(const Type& val) { + static_assert(std::is_trivially_copyable_v<std::remove_reference_t<Type>>, + "expected a trivially copyable type"); + return sizeof(val); +} + +template <typename Type> +static size_t sizeForBinder(const hidl_vec<Type>& vec) { + return std::accumulate(vec.begin(), vec.end(), 0, + [](size_t acc, const Type& x) { return acc + sizeForBinder(x); }); +} + +template <> +size_t sizeForBinder(const Operand& operand) { + size_t size = 0; + + size += sizeForBinder(operand.type); + size += sizeForBinder(operand.dimensions); + size += sizeForBinder(operand.numberOfConsumers); + size += sizeForBinder(operand.scale); + size += sizeForBinder(operand.zeroPoint); + size += sizeForBinder(operand.lifetime); + size += sizeForBinder(operand.location); + + return size; +} + +template <> +size_t sizeForBinder(const Operation& operation) { + size_t size = 0; + + size += sizeForBinder(operation.type); + size += sizeForBinder(operation.inputs); + size += sizeForBinder(operation.outputs); + + return size; +} + +template <> +size_t sizeForBinder(const hidl_string& name) { + return name.size(); +} + +template <> +size_t sizeForBinder(const hidl_memory& memory) { + // This is just a guess. + + size_t size = 0; + + if (const native_handle_t* handle = memory.handle()) { + size += sizeof(*handle); + size += sizeof(handle->data[0] * (handle->numFds + handle->numInts)); + } + size += sizeForBinder(memory.name()); + + return size; +} + +template <> +size_t sizeForBinder(const Model& model) { + size_t size = 0; + + size += sizeForBinder(model.operands); + size += sizeForBinder(model.operations); + size += sizeForBinder(model.inputIndexes); + size += sizeForBinder(model.outputIndexes); + size += sizeForBinder(model.operandValues); + size += sizeForBinder(model.pools); + + return size; +} + +// https://developer.android.com/reference/android/os/TransactionTooLargeException.html +// +// "The Binder transaction buffer has a limited fixed size, +// currently 1Mb, which is shared by all transactions in progress +// for the process." +// +// Will our representation fit under this limit? There are two complications: +// - Our representation size is just approximate (see sizeForBinder()). +// - This object may not be the only occupant of the Binder transaction buffer. +// So we'll be very conservative: We want the representation size to be no +// larger than half the transaction buffer size. +// +// If our representation grows large enough that it still fits within +// the transaction buffer but combined with other transactions may +// exceed the buffer size, then we may see intermittent HAL transport +// errors. +static bool exceedsBinderSizeLimit(size_t representationSize) { + // Instead of using this fixed buffer size, we might instead be able to use + // ProcessState::self()->getMmapSize(). However, this has a potential + // problem: The binder/mmap size of the current process does not necessarily + // indicate the binder/mmap size of the service (i.e., the other process). + // The only way it would be a good indication is if both the current process + // and the service use the default size. + static const size_t kHalfBufferSize = 1024 * 1024 / 2; + + return representationSize > kHalfBufferSize; +} + +///////////////////////// VALIDATE EXECUTION ORDER //////////////////////////// + +static void mutateExecutionOrderTest(const sp<IDevice>& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const Operation& operationObj = model.operations[operation]; + for (uint32_t input : operationObj.inputs) { + if (model.operands[input].lifetime == OperandLifeTime::TEMPORARY_VARIABLE || + model.operands[input].lifetime == OperandLifeTime::MODEL_OUTPUT) { + // This operation reads an operand written by some + // other operation. Move this operation to the + // beginning of the sequence, ensuring that it reads + // the operand before that operand is written, thereby + // violating execution order rules. + const std::string message = "mutateExecutionOrderTest: operation " + + std::to_string(operation) + " is a reader"; + validate(device, message, model, [operation](Model* model) { + auto& operations = model->operations; + std::rotate(operations.begin(), operations.begin() + operation, + operations.begin() + operation + 1); + }); + break; // only need to do this once per operation + } + } + for (uint32_t output : operationObj.outputs) { + if (model.operands[output].numberOfConsumers > 0) { + // This operation writes an operand read by some other + // operation. Move this operation to the end of the + // sequence, ensuring that it writes the operand after + // that operand is read, thereby violating execution + // order rules. + const std::string message = "mutateExecutionOrderTest: operation " + + std::to_string(operation) + " is a writer"; + validate(device, message, model, [operation](Model* model) { + auto& operations = model->operations; + std::rotate(operations.begin() + operation, operations.begin() + operation + 1, + operations.end()); + }); + break; // only need to do this once per operation + } + } + } +} + ///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// static const int32_t invalidOperandTypes[] = { @@ -218,9 +408,233 @@ static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& m } } +///////////////////////// VALIDATE OPERAND LIFETIME ///////////////////////////////////////////// + +static std::vector<OperandLifeTime> getInvalidLifeTimes(const Model& model, size_t modelSize, + const Operand& operand) { + // TODO: Support OperandLifeTime::CONSTANT_REFERENCE as an invalid lifetime + // TODO: Support OperandLifeTime::NO_VALUE as an invalid lifetime + + // Ways to get an invalid lifetime: + // - change whether a lifetime means an operand should have a writer + std::vector<OperandLifeTime> ret; + switch (operand.lifetime) { + case OperandLifeTime::MODEL_OUTPUT: + case OperandLifeTime::TEMPORARY_VARIABLE: + ret = { + OperandLifeTime::MODEL_INPUT, + OperandLifeTime::CONSTANT_COPY, + }; + break; + case OperandLifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_REFERENCE: + case OperandLifeTime::MODEL_INPUT: + ret = { + OperandLifeTime::TEMPORARY_VARIABLE, + OperandLifeTime::MODEL_OUTPUT, + }; + break; + case OperandLifeTime::NO_VALUE: + // Not enough information to know whether + // TEMPORARY_VARIABLE or CONSTANT_COPY would be invalid -- + // is this operand written (then CONSTANT_COPY would be + // invalid) or not (then TEMPORARY_VARIABLE would be + // invalid)? + break; + default: + ADD_FAILURE(); + break; + } + + const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown + if (!operandSize || + exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) { + // Unknown size or too-large size + ret.erase(std::remove(ret.begin(), ret.end(), OperandLifeTime::CONSTANT_COPY), ret.end()); + } + + return ret; +} + +static void mutateOperandLifeTimeTest(const sp<IDevice>& device, const V1_0::Model& model) { + const size_t modelSize = sizeForBinder(model); + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector<OperandLifeTime> invalidLifeTimes = + getInvalidLifeTimes(model, modelSize, model.operands[operand]); + for (OperandLifeTime invalidLifeTime : invalidLifeTimes) { + const std::string message = "mutateOperandLifetimeTest: operand " + + std::to_string(operand) + " has lifetime " + + toString(invalidLifeTime) + " instead of lifetime " + + toString(model.operands[operand].lifetime); + validate(device, message, model, [operand, invalidLifeTime](Model* model) { + static const DataLocation kZeroDataLocation = {}; + Operand& operandObj = model->operands[operand]; + switch (operandObj.lifetime) { + case OperandLifeTime::MODEL_INPUT: { + hidl_vec_remove(&model->inputIndexes, uint32_t(operand)); + break; + } + case OperandLifeTime::MODEL_OUTPUT: { + hidl_vec_remove(&model->outputIndexes, uint32_t(operand)); + break; + } + default: + break; + } + operandObj.lifetime = invalidLifeTime; + operandObj.location = kZeroDataLocation; + switch (invalidLifeTime) { + case OperandLifeTime::CONSTANT_COPY: { + becomeConstantCopy(model, &operandObj); + break; + } + case OperandLifeTime::MODEL_INPUT: + hidl_vec_push_back(&model->inputIndexes, uint32_t(operand)); + break; + case OperandLifeTime::MODEL_OUTPUT: + hidl_vec_push_back(&model->outputIndexes, uint32_t(operand)); + break; + default: + break; + } + }); + } + } +} + +///////////////////////// VALIDATE OPERAND INPUT-or-OUTPUT ////////////////////////////////////// + +static std::optional<OperandLifeTime> getInputOutputLifeTime(const Model& model, size_t modelSize, + const Operand& operand) { + // Ways to get an invalid lifetime (with respect to model inputIndexes and outputIndexes): + // - change whether a lifetime means an operand is a model input, a model output, or neither + // - preserve whether or not a lifetime means an operand should have a writer + switch (operand.lifetime) { + case OperandLifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_REFERENCE: + return OperandLifeTime::MODEL_INPUT; + case OperandLifeTime::MODEL_INPUT: { + const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown + if (!operandSize || + exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) { + // Unknown size or too-large size + break; + } + return OperandLifeTime::CONSTANT_COPY; + } + case OperandLifeTime::MODEL_OUTPUT: + return OperandLifeTime::TEMPORARY_VARIABLE; + case OperandLifeTime::TEMPORARY_VARIABLE: + return OperandLifeTime::MODEL_OUTPUT; + case OperandLifeTime::NO_VALUE: + // Not enough information to know whether + // TEMPORARY_VARIABLE or CONSTANT_COPY would be an + // appropriate choice -- is this operand written (then + // TEMPORARY_VARIABLE would be appropriate) or not (then + // CONSTANT_COPY would be appropriate)? + break; + default: + ADD_FAILURE(); + break; + } + + return std::nullopt; +} + +static void mutateOperandInputOutputTest(const sp<IDevice>& device, const V1_0::Model& model) { + const size_t modelSize = sizeForBinder(model); + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::optional<OperandLifeTime> changedLifeTime = + getInputOutputLifeTime(model, modelSize, model.operands[operand]); + if (changedLifeTime) { + const std::string message = "mutateOperandInputOutputTest: operand " + + std::to_string(operand) + " has lifetime " + + toString(*changedLifeTime) + " instead of lifetime " + + toString(model.operands[operand].lifetime); + validate(device, message, model, [operand, changedLifeTime](Model* model) { + static const DataLocation kZeroDataLocation = {}; + Operand& operandObj = model->operands[operand]; + operandObj.lifetime = *changedLifeTime; + operandObj.location = kZeroDataLocation; + if (*changedLifeTime == OperandLifeTime::CONSTANT_COPY) { + becomeConstantCopy(model, &operandObj); + } + }); + } + } +} + +///////////////////////// VALIDATE OPERAND NUMBER OF CONSUMERS ////////////////////////////////// + +static std::vector<uint32_t> getInvalidNumberOfConsumers(uint32_t numberOfConsumers) { + if (numberOfConsumers == 0) { + return {1}; + } else { + return {numberOfConsumers - 1, numberOfConsumers + 1}; + } +} + +static void mutateOperandNumberOfConsumersTest(const sp<IDevice>& device, + const V1_0::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector<uint32_t> invalidNumberOfConsumersVec = + getInvalidNumberOfConsumers(model.operands[operand].numberOfConsumers); + for (uint32_t invalidNumberOfConsumers : invalidNumberOfConsumersVec) { + const std::string message = + "mutateOperandNumberOfConsumersTest: operand " + std::to_string(operand) + + " numberOfConsumers = " + std::to_string(invalidNumberOfConsumers); + validate(device, message, model, [operand, invalidNumberOfConsumers](Model* model) { + model->operands[operand].numberOfConsumers = invalidNumberOfConsumers; + }); + } + } +} + +///////////////////////// VALIDATE OPERAND NUMBER OF WRITERS //////////////////////////////////// + +static void mutateOperandAddWriterTest(const sp<IDevice>& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t badOutputNum = 0; badOutputNum < model.operations[operation].outputs.size(); + ++badOutputNum) { + const uint32_t outputOperandIndex = model.operations[operation].outputs[badOutputNum]; + const std::string message = "mutateOperandAddWriterTest: operation " + + std::to_string(operation) + " writes to " + + std::to_string(outputOperandIndex); + // We'll insert a copy of the operation, all of whose + // OTHER output operands are newly-created -- i.e., + // there'll only be a duplicate write of ONE of that + // operation's output operands. + validate(device, message, model, [operation, badOutputNum](Model* model) { + Operation newOperation = model->operations[operation]; + for (uint32_t input : newOperation.inputs) { + ++model->operands[input].numberOfConsumers; + } + for (size_t outputNum = 0; outputNum < newOperation.outputs.size(); ++outputNum) { + if (outputNum == badOutputNum) continue; + + Operand operandValue = model->operands[newOperation.outputs[outputNum]]; + operandValue.numberOfConsumers = 0; + if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) { + operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; + } else { + ASSERT_EQ(operandValue.lifetime, OperandLifeTime::TEMPORARY_VARIABLE); + } + newOperation.outputs[outputNum] = + hidl_vec_push_back(&model->operands, operandValue); + } + // Where do we insert the extra writer (a new + // operation)? It has to be later than all the + // writers of its inputs. The easiest thing to do + // is to insert it at the end of the operation + // sequence. + hidl_vec_push_back(&model->operations, newOperation); + }); + } + } +} + ///////////////////////// VALIDATE EXTRA ??? ///////////////////////// -// TODO: Operand::lifetime // TODO: Operand::location ///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// @@ -351,6 +765,33 @@ static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, con } } +///////////////////////// VALIDATE MODEL OPERANDS WRITTEN /////////////////////////////////////// + +static void mutateOperationRemoveWriteTest(const sp<IDevice>& device, const V1_0::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t outputNum = 0; outputNum < model.operations[operation].outputs.size(); + ++outputNum) { + const uint32_t outputOperandIndex = model.operations[operation].outputs[outputNum]; + if (model.operands[outputOperandIndex].numberOfConsumers > 0) { + const std::string message = "mutateOperationRemoveWriteTest: operation " + + std::to_string(operation) + " writes to " + + std::to_string(outputOperandIndex); + validate(device, message, model, [operation, outputNum](Model* model) { + uint32_t& outputOperandIndex = model->operations[operation].outputs[outputNum]; + Operand operandValue = model->operands[outputOperandIndex]; + operandValue.numberOfConsumers = 0; + if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) { + operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; + } else { + ASSERT_EQ(operandValue.lifetime, OperandLifeTime::TEMPORARY_VARIABLE); + } + outputOperandIndex = hidl_vec_push_back(&model->operands, operandValue); + }); + } + } + } +} + ///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) { @@ -476,14 +917,20 @@ static void addOperationOutputTest(const sp<IDevice>& device, const Model& model ////////////////////////// ENTRY POINT ////////////////////////////// void validateModel(const sp<IDevice>& device, const Model& model) { + mutateExecutionOrderTest(device, model); mutateOperandTypeTest(device, model); mutateOperandRankTest(device, model); mutateOperandScaleTest(device, model); mutateOperandZeroPointTest(device, model); + mutateOperandLifeTimeTest(device, model); + mutateOperandInputOutputTest(device, model); + mutateOperandNumberOfConsumersTest(device, model); + mutateOperandAddWriterTest(device, model); mutateOperationOperandTypeTest(device, model); mutateOperationTypeTest(device, model); mutateOperationInputOperandIndexTest(device, model); mutateOperationOutputOperandIndexTest(device, model); + mutateOperationRemoveWriteTest(device, model); removeOperandTest(device, model); removeOperationTest(device, model); removeOperationInputTest(device, model); diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h index 3292f79b1a..7bd0460b82 100644 --- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h +++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h @@ -21,6 +21,7 @@ #include <android/hardware/neuralnetworks/1.0/types.h> #include <android/hardware_buffer.h> #include <android/hidl/memory/1.0/IMemory.h> +#include <gtest/gtest.h> #include <algorithm> #include <iosfwd> #include <string> @@ -108,6 +109,15 @@ inline void hidl_vec_removeAt(hidl_vec<Type>* vec, uint32_t index) { vec->resize(vec->size() - 1); } +// Assumes there is exactly one instance of the value in the vector. +template <typename Type> +inline void hidl_vec_remove(hidl_vec<Type>* vec, const Type& val) { + CHECK(vec != nullptr); + auto where = std::find(vec->begin(), vec->end(), val); + ASSERT_NE(where, vec->end()); + hidl_vec_removeAt(vec, where - vec->begin()); +} + template <typename Type> inline uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { CHECK(vec != nullptr); @@ -117,6 +127,18 @@ inline uint32_t hidl_vec_push_back(hidl_vec<Type>* vec, const Type& value) { return index; } +// Returns the amount of space needed to store a value of the specified type. +// +// Aborts if the specified type is an extension type or OEM type. +uint32_t sizeOfData(V1_0::OperandType type); + +// Returns the amount of space needed to store a value of the dimensions and +// type of this operand. For a non-extension, non-OEM tensor with unspecified +// rank or at least one unspecified dimension, returns zero. +// +// Aborts if the specified type is an extension type or OEM type. +uint32_t sizeOfData(const V1_0::Operand& operand); + template <typename Type> using Named = std::pair<std::string, Type>; diff --git a/neuralnetworks/1.1/vts/functional/BasicTests.cpp b/neuralnetworks/1.1/vts/functional/BasicTests.cpp index 44836f0c95..baadd1b23b 100644 --- a/neuralnetworks/1.1/vts/functional/BasicTests.cpp +++ b/neuralnetworks/1.1/vts/functional/BasicTests.cpp @@ -18,10 +18,16 @@ #include "VtsHalNeuralnetworks.h" +#include "1.0/Callbacks.h" + namespace android::hardware::neuralnetworks::V1_1::vts::functional { using V1_0::DeviceStatus; using V1_0::ErrorStatus; +using V1_0::Operand; +using V1_0::OperandLifeTime; +using V1_0::OperandType; +using V1_0::implementation::PreparedModelCallback; // create device test TEST_P(NeuralnetworksHidlTest, CreateDevice) {} @@ -48,4 +54,137 @@ TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) { EXPECT_TRUE(ret.isOk()); } +// detect cycle +TEST_P(NeuralnetworksHidlTest, CycleTest) { + // opnd0 = TENSOR_FLOAT32 // model input + // opnd1 = TENSOR_FLOAT32 // model input + // opnd2 = INT32 // model input + // opnd3 = ADD(opnd0, opnd4, opnd2) + // opnd4 = ADD(opnd1, opnd3, opnd2) + // opnd5 = ADD(opnd4, opnd0, opnd2) // model output + // + // +-----+ + // | | + // v | + // 3 = ADD(0, 4, 2) | + // | | + // +----------+ | + // | | + // v | + // 4 = ADD(1, 3, 2) | + // | | + // +----------------+ + // | + // | + // +-------+ + // | + // v + // 5 = ADD(4, 0, 2) + + const std::vector<Operand> operands = { + { + // operands[0] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 2, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[1] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[2] + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 3, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[3] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[4] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 2, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[5] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_OUTPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + }; + + const std::vector<Operation> operations = { + {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}}, + {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}}, + {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}}, + }; + + const Model model = { + .operands = operands, + .operations = operations, + .inputIndexes = {0, 1, 2}, + .outputIndexes = {5}, + .operandValues = {}, + .pools = {}, + }; + + // ensure that getSupportedOperations_1_1() checks model validity + ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE; + Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_1( + model, [&model, &supportedOpsErrorStatus](ErrorStatus status, + const hidl_vec<bool>& supported) { + supportedOpsErrorStatus = status; + if (status == ErrorStatus::NONE) { + ASSERT_EQ(supported.size(), model.operations.size()); + } + }); + ASSERT_TRUE(supportedOpsReturn.isOk()); + ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT); + + // ensure that prepareModel_1_1() checks model validity + sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback; + Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_1( + model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback); + ASSERT_TRUE(prepareLaunchReturn.isOk()); + // Note that preparation can fail for reasons other than an + // invalid model (invalid model should result in + // INVALID_ARGUMENT) -- for example, perhaps not all + // operations are supported, or perhaps the device hit some + // kind of capacity limit. + EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE); + EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE); + EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr); +} + } // namespace android::hardware::neuralnetworks::V1_1::vts::functional diff --git a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp index 3b6f0f8300..1f4e4eda49 100644 --- a/neuralnetworks/1.1/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.1/vts/functional/ValidateModel.cpp @@ -16,13 +16,19 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" +#include <android/hardware/neuralnetworks/1.1/types.h> #include "1.0/Callbacks.h" #include "1.0/Utils.h" #include "GeneratedTestHarness.h" #include "VtsHalNeuralnetworks.h" +#include <optional> +#include <type_traits> +#include <utility> + namespace android::hardware::neuralnetworks::V1_1::vts::functional { +using V1_0::DataLocation; using V1_0::ErrorStatus; using V1_0::IPreparedModel; using V1_0::Operand; @@ -105,6 +111,212 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { return index; } +// If we introduce a CONSTANT_COPY for an operand of size operandSize, +// how much will this increase the size of the model? This assumes +// that we can (re)use all of model.operandValues for the operand +// value. +static size_t constantCopyExtraSize(const Model& model, size_t operandSize) { + const size_t operandValuesSize = model.operandValues.size(); + return (operandValuesSize < operandSize) ? (operandSize - operandValuesSize) : 0; +} + +// Highly specialized utility routine for converting an operand to +// CONSTANT_COPY lifetime. +// +// Expects that: +// - operand has a known size +// - operand->lifetime has already been set to CONSTANT_COPY +// - operand->location has been zeroed out +// +// Does the following: +// - initializes operand->location to point to the beginning of model->operandValues +// - resizes model->operandValues (if necessary) to be large enough for the operand +// value, padding it with zeroes on the end +// +// Potential problem: +// By changing the operand to CONSTANT_COPY lifetime, this function is effectively initializing the +// operand with unspecified (but deterministic) data. This means that the model may be invalidated +// in two ways: not only is the lifetime of CONSTANT_COPY invalid, but the operand's value in the +// graph may also be invalid (e.g., if the operand is used as an activation code and has an invalid +// value). For now, this should be fine because it just means we're not testing what we think we're +// testing in certain cases; but we can handwave this and assume we're probabilistically likely to +// exercise the validation code over the span of the entire test set and operand space. +// +// Aborts if the specified operand type is an extension type or OEM type. +static void becomeConstantCopy(Model* model, Operand* operand) { + // sizeOfData will abort if the specified type is an extension type or OEM type. + const size_t sizeOfOperand = sizeOfData(*operand); + EXPECT_NE(sizeOfOperand, size_t(0)); + operand->location.poolIndex = 0; + operand->location.offset = 0; + operand->location.length = sizeOfOperand; + if (model->operandValues.size() < sizeOfOperand) { + model->operandValues.resize(sizeOfOperand); + } +} + +// The sizeForBinder() functions estimate the size of the +// representation of a value when sent to binder. It's probably a bit +// of an under-estimate, because we don't know the size of the +// metadata in the binder format (e.g., representation of the size of +// a vector); but at least it adds up "big" things like vector +// contents. However, it doesn't treat inter-field or end-of-struct +// padding in a methodical way -- there's no attempt to be consistent +// in whether or not padding in the native (C++) representation +// contributes to the estimated size for the binder representation; +// and there's no attempt to understand what padding (if any) is +// needed in the binder representation. +// +// This assumes that non-metadata uses a fixed length encoding (e.g., +// a uint32_t is always encoded in sizeof(uint32_t) bytes, rather than +// using an encoding whose length is related to the magnitude of the +// encoded value). + +template <typename Type> +static size_t sizeForBinder(const Type& val) { + static_assert(std::is_trivially_copyable_v<std::remove_reference_t<Type>>, + "expected a trivially copyable type"); + return sizeof(val); +} + +template <typename Type> +static size_t sizeForBinder(const hidl_vec<Type>& vec) { + return std::accumulate(vec.begin(), vec.end(), 0, + [](size_t acc, const Type& x) { return acc + sizeForBinder(x); }); +} + +template <> +size_t sizeForBinder(const Operand& operand) { + size_t size = 0; + + size += sizeForBinder(operand.type); + size += sizeForBinder(operand.dimensions); + size += sizeForBinder(operand.numberOfConsumers); + size += sizeForBinder(operand.scale); + size += sizeForBinder(operand.zeroPoint); + size += sizeForBinder(operand.lifetime); + size += sizeForBinder(operand.location); + + return size; +} + +template <> +size_t sizeForBinder(const Operation& operation) { + size_t size = 0; + + size += sizeForBinder(operation.type); + size += sizeForBinder(operation.inputs); + size += sizeForBinder(operation.outputs); + + return size; +} + +template <> +size_t sizeForBinder(const hidl_string& name) { + return name.size(); +} + +template <> +size_t sizeForBinder(const hidl_memory& memory) { + // This is just a guess. + + size_t size = 0; + + if (const native_handle_t* handle = memory.handle()) { + size += sizeof(*handle); + size += sizeof(handle->data[0] * (handle->numFds + handle->numInts)); + } + size += sizeForBinder(memory.name()); + + return size; +} + +template <> +size_t sizeForBinder(const Model& model) { + size_t size = 0; + + size += sizeForBinder(model.operands); + size += sizeForBinder(model.operations); + size += sizeForBinder(model.inputIndexes); + size += sizeForBinder(model.outputIndexes); + size += sizeForBinder(model.operandValues); + size += sizeForBinder(model.pools); + size += sizeForBinder(model.relaxComputationFloat32toFloat16); + + return size; +} + +// https://developer.android.com/reference/android/os/TransactionTooLargeException.html +// +// "The Binder transaction buffer has a limited fixed size, +// currently 1Mb, which is shared by all transactions in progress +// for the process." +// +// Will our representation fit under this limit? There are two complications: +// - Our representation size is just approximate (see sizeForBinder()). +// - This object may not be the only occupant of the Binder transaction buffer. +// So we'll be very conservative: We want the representation size to be no +// larger than half the transaction buffer size. +// +// If our representation grows large enough that it still fits within +// the transaction buffer but combined with other transactions may +// exceed the buffer size, then we may see intermittent HAL transport +// errors. +static bool exceedsBinderSizeLimit(size_t representationSize) { + // Instead of using this fixed buffer size, we might instead be able to use + // ProcessState::self()->getMmapSize(). However, this has a potential + // problem: The binder/mmap size of the current process does not necessarily + // indicate the binder/mmap size of the service (i.e., the other process). + // The only way it would be a good indication is if both the current process + // and the service use the default size. + static const size_t kHalfBufferSize = 1024 * 1024 / 2; + + return representationSize > kHalfBufferSize; +} + +///////////////////////// VALIDATE EXECUTION ORDER //////////////////////////// + +static void mutateExecutionOrderTest(const sp<IDevice>& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const Operation& operationObj = model.operations[operation]; + for (uint32_t input : operationObj.inputs) { + if (model.operands[input].lifetime == OperandLifeTime::TEMPORARY_VARIABLE || + model.operands[input].lifetime == OperandLifeTime::MODEL_OUTPUT) { + // This operation reads an operand written by some + // other operation. Move this operation to the + // beginning of the sequence, ensuring that it reads + // the operand before that operand is written, thereby + // violating execution order rules. + const std::string message = "mutateExecutionOrderTest: operation " + + std::to_string(operation) + " is a reader"; + validate(device, message, model, [operation](Model* model, ExecutionPreference*) { + auto& operations = model->operations; + std::rotate(operations.begin(), operations.begin() + operation, + operations.begin() + operation + 1); + }); + break; // only need to do this once per operation + } + } + for (uint32_t output : operationObj.outputs) { + if (model.operands[output].numberOfConsumers > 0) { + // This operation writes an operand read by some other + // operation. Move this operation to the end of the + // sequence, ensuring that it writes the operand after + // that operand is read, thereby violating execution + // order rules. + const std::string message = "mutateExecutionOrderTest: operation " + + std::to_string(operation) + " is a writer"; + validate(device, message, model, [operation](Model* model, ExecutionPreference*) { + auto& operations = model->operations; + std::rotate(operations.begin() + operation, operations.begin() + operation + 1, + operations.end()); + }); + break; // only need to do this once per operation + } + } + } +} + ///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// static const int32_t invalidOperandTypes[] = { @@ -221,9 +433,240 @@ static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& m } } +///////////////////////// VALIDATE OPERAND LIFETIME ///////////////////////////////////////////// + +static std::vector<OperandLifeTime> getInvalidLifeTimes(const Model& model, size_t modelSize, + const Operand& operand) { + // TODO: Support OperandLifeTime::CONSTANT_REFERENCE as an invalid lifetime + // TODO: Support OperandLifeTime::NO_VALUE as an invalid lifetime + + // Ways to get an invalid lifetime: + // - change whether a lifetime means an operand should have a writer + std::vector<OperandLifeTime> ret; + switch (operand.lifetime) { + case OperandLifeTime::MODEL_OUTPUT: + case OperandLifeTime::TEMPORARY_VARIABLE: + ret = { + OperandLifeTime::MODEL_INPUT, + OperandLifeTime::CONSTANT_COPY, + }; + break; + case OperandLifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_REFERENCE: + case OperandLifeTime::MODEL_INPUT: + ret = { + OperandLifeTime::TEMPORARY_VARIABLE, + OperandLifeTime::MODEL_OUTPUT, + }; + break; + case OperandLifeTime::NO_VALUE: + // Not enough information to know whether + // TEMPORARY_VARIABLE or CONSTANT_COPY would be invalid -- + // is this operand written (then CONSTANT_COPY would be + // invalid) or not (then TEMPORARY_VARIABLE would be + // invalid)? + break; + default: + ADD_FAILURE(); + break; + } + + const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown + if (!operandSize || + exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) { + // Unknown size or too-large size + ret.erase(std::remove(ret.begin(), ret.end(), OperandLifeTime::CONSTANT_COPY), ret.end()); + } + + return ret; +} + +static void mutateOperandLifeTimeTest(const sp<IDevice>& device, const V1_1::Model& model) { + const size_t modelSize = sizeForBinder(model); + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector<OperandLifeTime> invalidLifeTimes = + getInvalidLifeTimes(model, modelSize, model.operands[operand]); + for (OperandLifeTime invalidLifeTime : invalidLifeTimes) { + const std::string message = "mutateOperandLifetimeTest: operand " + + std::to_string(operand) + " has lifetime " + + toString(invalidLifeTime) + " instead of lifetime " + + toString(model.operands[operand].lifetime); + validate(device, message, model, + [operand, invalidLifeTime](Model* model, ExecutionPreference*) { + static const DataLocation kZeroDataLocation = {}; + Operand& operandObj = model->operands[operand]; + switch (operandObj.lifetime) { + case OperandLifeTime::MODEL_INPUT: { + hidl_vec_remove(&model->inputIndexes, uint32_t(operand)); + break; + } + case OperandLifeTime::MODEL_OUTPUT: { + hidl_vec_remove(&model->outputIndexes, uint32_t(operand)); + break; + } + default: + break; + } + operandObj.lifetime = invalidLifeTime; + operandObj.location = kZeroDataLocation; + switch (invalidLifeTime) { + case OperandLifeTime::CONSTANT_COPY: { + becomeConstantCopy(model, &operandObj); + break; + } + case OperandLifeTime::MODEL_INPUT: + hidl_vec_push_back(&model->inputIndexes, uint32_t(operand)); + break; + case OperandLifeTime::MODEL_OUTPUT: + hidl_vec_push_back(&model->outputIndexes, uint32_t(operand)); + break; + default: + break; + } + }); + } + } +} + +///////////////////////// VALIDATE OPERAND INPUT-or-OUTPUT ////////////////////////////////////// + +static std::optional<OperandLifeTime> getInputOutputLifeTime(const Model& model, size_t modelSize, + const Operand& operand) { + // Ways to get an invalid lifetime (with respect to model inputIndexes and outputIndexes): + // - change whether a lifetime means an operand is a model input, a model output, or neither + // - preserve whether or not a lifetime means an operand should have a writer + switch (operand.lifetime) { + case OperandLifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_REFERENCE: + return OperandLifeTime::MODEL_INPUT; + case OperandLifeTime::MODEL_INPUT: { + const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown + if (!operandSize || + exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) { + // Unknown size or too-large size + break; + } + return OperandLifeTime::CONSTANT_COPY; + } + case OperandLifeTime::MODEL_OUTPUT: + return OperandLifeTime::TEMPORARY_VARIABLE; + case OperandLifeTime::TEMPORARY_VARIABLE: + return OperandLifeTime::MODEL_OUTPUT; + case OperandLifeTime::NO_VALUE: + // Not enough information to know whether + // TEMPORARY_VARIABLE or CONSTANT_COPY would be an + // appropriate choice -- is this operand written (then + // TEMPORARY_VARIABLE would be appropriate) or not (then + // CONSTANT_COPY would be appropriate)? + break; + default: + ADD_FAILURE(); + break; + } + + return std::nullopt; +} + +static void mutateOperandInputOutputTest(const sp<IDevice>& device, const V1_1::Model& model) { + const size_t modelSize = sizeForBinder(model); + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::optional<OperandLifeTime> changedLifeTime = + getInputOutputLifeTime(model, modelSize, model.operands[operand]); + if (changedLifeTime) { + const std::string message = "mutateOperandInputOutputTest: operand " + + std::to_string(operand) + " has lifetime " + + toString(*changedLifeTime) + " instead of lifetime " + + toString(model.operands[operand].lifetime); + validate(device, message, model, + [operand, changedLifeTime](Model* model, ExecutionPreference*) { + static const DataLocation kZeroDataLocation = {}; + Operand& operandObj = model->operands[operand]; + operandObj.lifetime = *changedLifeTime; + operandObj.location = kZeroDataLocation; + if (*changedLifeTime == OperandLifeTime::CONSTANT_COPY) { + becomeConstantCopy(model, &operandObj); + } + }); + } + } +} + +///////////////////////// VALIDATE OPERAND NUMBER OF CONSUMERS ////////////////////////////////// + +static std::vector<uint32_t> getInvalidNumberOfConsumers(uint32_t numberOfConsumers) { + if (numberOfConsumers == 0) { + return {1}; + } else { + return {numberOfConsumers - 1, numberOfConsumers + 1}; + } +} + +static void mutateOperandNumberOfConsumersTest(const sp<IDevice>& device, + const V1_1::Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector<uint32_t> invalidNumberOfConsumersVec = + getInvalidNumberOfConsumers(model.operands[operand].numberOfConsumers); + for (uint32_t invalidNumberOfConsumers : invalidNumberOfConsumersVec) { + const std::string message = + "mutateOperandNumberOfConsumersTest: operand " + std::to_string(operand) + + " numberOfConsumers = " + std::to_string(invalidNumberOfConsumers); + validate(device, message, model, + [operand, invalidNumberOfConsumers](Model* model, ExecutionPreference*) { + model->operands[operand].numberOfConsumers = invalidNumberOfConsumers; + }); + } + } +} + +///////////////////////// VALIDATE OPERAND NUMBER OF WRITERS //////////////////////////////////// + +static void mutateOperandAddWriterTest(const sp<IDevice>& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t badOutputNum = 0; badOutputNum < model.operations[operation].outputs.size(); + ++badOutputNum) { + const uint32_t outputOperandIndex = model.operations[operation].outputs[badOutputNum]; + const std::string message = "mutateOperandAddWriterTest: operation " + + std::to_string(operation) + " writes to " + + std::to_string(outputOperandIndex); + // We'll insert a copy of the operation, all of whose + // OTHER output operands are newly-created -- i.e., + // there'll only be a duplicate write of ONE of that + // operation's output operands. + validate(device, message, model, + [operation, badOutputNum](Model* model, ExecutionPreference*) { + Operation newOperation = model->operations[operation]; + for (uint32_t input : newOperation.inputs) { + ++model->operands[input].numberOfConsumers; + } + for (size_t outputNum = 0; outputNum < newOperation.outputs.size(); + ++outputNum) { + if (outputNum == badOutputNum) continue; + + Operand operandValue = + model->operands[newOperation.outputs[outputNum]]; + operandValue.numberOfConsumers = 0; + if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) { + operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; + } else { + ASSERT_EQ(operandValue.lifetime, + OperandLifeTime::TEMPORARY_VARIABLE); + } + newOperation.outputs[outputNum] = + hidl_vec_push_back(&model->operands, operandValue); + } + // Where do we insert the extra writer (a new + // operation)? It has to be later than all the + // writers of its inputs. The easiest thing to do + // is to insert it at the end of the operation + // sequence. + hidl_vec_push_back(&model->operations, newOperation); + }); + } + } +} + ///////////////////////// VALIDATE EXTRA ??? ///////////////////////// -// TODO: Operand::lifetime // TODO: Operand::location ///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// @@ -358,6 +801,37 @@ static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, con } } +///////////////////////// VALIDATE MODEL OPERANDS WRITTEN /////////////////////////////////////// + +static void mutateOperationRemoveWriteTest(const sp<IDevice>& device, const V1_1::Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t outputNum = 0; outputNum < model.operations[operation].outputs.size(); + ++outputNum) { + const uint32_t outputOperandIndex = model.operations[operation].outputs[outputNum]; + if (model.operands[outputOperandIndex].numberOfConsumers > 0) { + const std::string message = "mutateOperationRemoveWriteTest: operation " + + std::to_string(operation) + " writes to " + + std::to_string(outputOperandIndex); + validate(device, message, model, + [operation, outputNum](Model* model, ExecutionPreference*) { + uint32_t& outputOperandIndex = + model->operations[operation].outputs[outputNum]; + Operand operandValue = model->operands[outputOperandIndex]; + operandValue.numberOfConsumers = 0; + if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) { + operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; + } else { + ASSERT_EQ(operandValue.lifetime, + OperandLifeTime::TEMPORARY_VARIABLE); + } + outputOperandIndex = + hidl_vec_push_back(&model->operands, operandValue); + }); + } + } + } +} + ///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) { @@ -504,14 +978,20 @@ static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model ////////////////////////// ENTRY POINT ////////////////////////////// void validateModel(const sp<IDevice>& device, const Model& model) { + mutateExecutionOrderTest(device, model); mutateOperandTypeTest(device, model); mutateOperandRankTest(device, model); mutateOperandScaleTest(device, model); mutateOperandZeroPointTest(device, model); + mutateOperandLifeTimeTest(device, model); + mutateOperandInputOutputTest(device, model); + mutateOperandNumberOfConsumersTest(device, model); + mutateOperandAddWriterTest(device, model); mutateOperationOperandTypeTest(device, model); mutateOperationTypeTest(device, model); mutateOperationInputOperandIndexTest(device, model); mutateOperationOutputOperandIndexTest(device, model); + mutateOperationRemoveWriteTest(device, model); removeOperandTest(device, model); removeOperationTest(device, model); removeOperationInputTest(device, model); diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp index 481eb80258..182f716115 100644 --- a/neuralnetworks/1.2/vts/functional/Android.bp +++ b/neuralnetworks/1.2/vts/functional/Android.bp @@ -15,11 +15,12 @@ // cc_library_static { - name: "VtsHalNeuralNetworksV1_2Callbacks", + name: "VtsHalNeuralNetworksV1_2_utils", defaults: ["neuralnetworks_vts_functional_defaults"], export_include_dirs: ["include"], srcs: [ "Callbacks.cpp", + "Utils.cpp", ], static_libs: [ "android.hardware.neuralnetworks@1.0", @@ -51,7 +52,7 @@ cc_test { ], static_libs: [ "VtsHalNeuralNetworksV1_0_utils", - "VtsHalNeuralNetworksV1_2Callbacks", + "VtsHalNeuralNetworksV1_2_utils", "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", "android.hardware.neuralnetworks@1.2", diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp index 58d3c4a403..77340e7434 100644 --- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp +++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp @@ -20,9 +20,13 @@ namespace android::hardware::neuralnetworks::V1_2::vts::functional { +using implementation::PreparedModelCallback; using V1_0::DeviceStatus; using V1_0::ErrorStatus; +using V1_0::OperandLifeTime; using V1_0::PerformanceInfo; +using V1_1::ExecutionPreference; +using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; // create device test TEST_P(NeuralnetworksHidlTest, CreateDevice) {} @@ -123,4 +127,139 @@ TEST_P(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) { }); EXPECT_TRUE(ret.isOk()); } + +// detect cycle +TEST_P(NeuralnetworksHidlTest, CycleTest) { + // opnd0 = TENSOR_FLOAT32 // model input + // opnd1 = TENSOR_FLOAT32 // model input + // opnd2 = INT32 // model input + // opnd3 = ADD(opnd0, opnd4, opnd2) + // opnd4 = ADD(opnd1, opnd3, opnd2) + // opnd5 = ADD(opnd4, opnd0, opnd2) // model output + // + // +-----+ + // | | + // v | + // 3 = ADD(0, 4, 2) | + // | | + // +----------+ | + // | | + // v | + // 4 = ADD(1, 3, 2) | + // | | + // +----------------+ + // | + // | + // +-------+ + // | + // v + // 5 = ADD(4, 0, 2) + + const std::vector<Operand> operands = { + { + // operands[0] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 2, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[1] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[2] + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 3, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[3] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[4] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 2, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[5] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::MODEL_OUTPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + }; + + const std::vector<Operation> operations = { + {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}}, + {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}}, + {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}}, + }; + + const Model model = { + .operands = operands, + .operations = operations, + .inputIndexes = {0, 1, 2}, + .outputIndexes = {5}, + .operandValues = {}, + .pools = {}, + }; + + // ensure that getSupportedOperations_1_2() checks model validity + ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE; + Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_2( + model, [&model, &supportedOpsErrorStatus](ErrorStatus status, + const hidl_vec<bool>& supported) { + supportedOpsErrorStatus = status; + if (status == ErrorStatus::NONE) { + ASSERT_EQ(supported.size(), model.operations.size()); + } + }); + ASSERT_TRUE(supportedOpsReturn.isOk()); + ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT); + + // ensure that prepareModel_1_2() checks model validity + sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback; + Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_2( + model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(), + hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback); + ASSERT_TRUE(prepareLaunchReturn.isOk()); + // Note that preparation can fail for reasons other than an + // invalid model (invalid model should result in + // INVALID_ARGUMENT) -- for example, perhaps not all + // operations are supported, or perhaps the device hit some + // kind of capacity limit. + EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE); + EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE); + EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr); +} + } // namespace android::hardware::neuralnetworks::V1_2::vts::functional diff --git a/neuralnetworks/1.2/vts/functional/Utils.cpp b/neuralnetworks/1.2/vts/functional/Utils.cpp new file mode 100644 index 0000000000..cc654f2c57 --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/Utils.cpp @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <android-base/logging.h> +#include <android/hardware/neuralnetworks/1.2/types.h> + +#include <functional> +#include <numeric> + +namespace android { +namespace hardware { +namespace neuralnetworks { + +uint32_t sizeOfData(V1_2::OperandType type) { + switch (type) { + case V1_2::OperandType::FLOAT32: + case V1_2::OperandType::INT32: + case V1_2::OperandType::UINT32: + case V1_2::OperandType::TENSOR_FLOAT32: + case V1_2::OperandType::TENSOR_INT32: + return 4; + case V1_2::OperandType::TENSOR_QUANT16_SYMM: + case V1_2::OperandType::TENSOR_FLOAT16: + case V1_2::OperandType::FLOAT16: + case V1_2::OperandType::TENSOR_QUANT16_ASYMM: + return 2; + case V1_2::OperandType::TENSOR_QUANT8_ASYMM: + case V1_2::OperandType::BOOL: + case V1_2::OperandType::TENSOR_BOOL8: + case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + case V1_2::OperandType::TENSOR_QUANT8_SYMM: + return 1; + default: + CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); + return 0; + } +} + +static bool isTensor(V1_2::OperandType type) { + switch (type) { + case V1_2::OperandType::FLOAT32: + case V1_2::OperandType::INT32: + case V1_2::OperandType::UINT32: + case V1_2::OperandType::FLOAT16: + case V1_2::OperandType::BOOL: + return false; + case V1_2::OperandType::TENSOR_FLOAT32: + case V1_2::OperandType::TENSOR_INT32: + case V1_2::OperandType::TENSOR_QUANT16_SYMM: + case V1_2::OperandType::TENSOR_FLOAT16: + case V1_2::OperandType::TENSOR_QUANT16_ASYMM: + case V1_2::OperandType::TENSOR_QUANT8_ASYMM: + case V1_2::OperandType::TENSOR_BOOL8: + case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + case V1_2::OperandType::TENSOR_QUANT8_SYMM: + return true; + default: + CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); + return false; + } +} + +uint32_t sizeOfData(const V1_2::Operand& operand) { + const uint32_t dataSize = sizeOfData(operand.type); + if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0; + return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize, + std::multiplies<>{}); +} + +} // namespace neuralnetworks +} // namespace hardware +} // namespace android diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp index 7451f095bf..3375602d27 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp @@ -16,14 +16,21 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" +#include <android/hardware/neuralnetworks/1.1/types.h> #include "1.0/Utils.h" #include "1.2/Callbacks.h" +#include "1.2/Utils.h" #include "GeneratedTestHarness.h" #include "VtsHalNeuralnetworks.h" +#include <optional> +#include <type_traits> +#include <utility> + namespace android::hardware::neuralnetworks::V1_2::vts::functional { using implementation::PreparedModelCallback; +using V1_0::DataLocation; using V1_0::ErrorStatus; using V1_0::OperandLifeTime; using V1_1::ExecutionPreference; @@ -105,6 +112,250 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { return index; } +// If we introduce a CONSTANT_COPY for an operand of size operandSize, +// how much will this increase the size of the model? This assumes +// that we can (re)use all of model.operandValues for the operand +// value. +static size_t constantCopyExtraSize(const Model& model, size_t operandSize) { + const size_t operandValuesSize = model.operandValues.size(); + return (operandValuesSize < operandSize) ? (operandSize - operandValuesSize) : 0; +} + +// Highly specialized utility routine for converting an operand to +// CONSTANT_COPY lifetime. +// +// Expects that: +// - operand has a known size +// - operand->lifetime has already been set to CONSTANT_COPY +// - operand->location has been zeroed out +// +// Does the following: +// - initializes operand->location to point to the beginning of model->operandValues +// - resizes model->operandValues (if necessary) to be large enough for the operand +// value, padding it with zeroes on the end +// +// Potential problem: +// By changing the operand to CONSTANT_COPY lifetime, this function is effectively initializing the +// operand with unspecified (but deterministic) data. This means that the model may be invalidated +// in two ways: not only is the lifetime of CONSTANT_COPY invalid, but the operand's value in the +// graph may also be invalid (e.g., if the operand is used as an activation code and has an invalid +// value). For now, this should be fine because it just means we're not testing what we think we're +// testing in certain cases; but we can handwave this and assume we're probabilistically likely to +// exercise the validation code over the span of the entire test set and operand space. +// +// Aborts if the specified operand type is an extension type or OEM type. +static void becomeConstantCopy(Model* model, Operand* operand) { + // sizeOfData will abort if the specified type is an extension type or OEM type. + const size_t sizeOfOperand = sizeOfData(*operand); + EXPECT_NE(sizeOfOperand, size_t(0)); + operand->location.poolIndex = 0; + operand->location.offset = 0; + operand->location.length = sizeOfOperand; + if (model->operandValues.size() < sizeOfOperand) { + model->operandValues.resize(sizeOfOperand); + } +} + +// The sizeForBinder() functions estimate the size of the +// representation of a value when sent to binder. It's probably a bit +// of an under-estimate, because we don't know the size of the +// metadata in the binder format (e.g., representation of the size of +// a vector); but at least it adds up "big" things like vector +// contents. However, it doesn't treat inter-field or end-of-struct +// padding in a methodical way -- there's no attempt to be consistent +// in whether or not padding in the native (C++) representation +// contributes to the estimated size for the binder representation; +// and there's no attempt to understand what padding (if any) is +// needed in the binder representation. +// +// This assumes that non-metadata uses a fixed length encoding (e.g., +// a uint32_t is always encoded in sizeof(uint32_t) bytes, rather than +// using an encoding whose length is related to the magnitude of the +// encoded value). + +template <typename Type> +static size_t sizeForBinder(const Type& val) { + static_assert(std::is_trivially_copyable_v<std::remove_reference_t<Type>>, + "expected a trivially copyable type"); + return sizeof(val); +} + +template <typename Type> +static size_t sizeForBinder(const hidl_vec<Type>& vec) { + return std::accumulate(vec.begin(), vec.end(), 0, + [](size_t acc, const Type& x) { return acc + sizeForBinder(x); }); +} + +template <> +size_t sizeForBinder(const SymmPerChannelQuantParams& symmPerChannelQuantParams) { + size_t size = 0; + + size += sizeForBinder(symmPerChannelQuantParams.scales); + size += sizeForBinder(symmPerChannelQuantParams.channelDim); + + return size; +} + +template <> +size_t sizeForBinder(const Operand::ExtraParams& extraParams) { + using Discriminator = Operand::ExtraParams::hidl_discriminator; + switch (extraParams.getDiscriminator()) { + case Discriminator::none: + return 0; + case Discriminator::channelQuant: + return sizeForBinder(extraParams.channelQuant()); + case Discriminator::extension: + return sizeForBinder(extraParams.extension()); + } + LOG(FATAL) << "Unrecognized extraParams enum: " + << static_cast<int>(extraParams.getDiscriminator()); + return 0; +} + +template <> +size_t sizeForBinder(const Operand& operand) { + size_t size = 0; + + size += sizeForBinder(operand.type); + size += sizeForBinder(operand.dimensions); + size += sizeForBinder(operand.numberOfConsumers); + size += sizeForBinder(operand.scale); + size += sizeForBinder(operand.zeroPoint); + size += sizeForBinder(operand.lifetime); + size += sizeForBinder(operand.location); + size += sizeForBinder(operand.extraParams); + + return size; +} + +template <> +size_t sizeForBinder(const Operation& operation) { + size_t size = 0; + + size += sizeForBinder(operation.type); + size += sizeForBinder(operation.inputs); + size += sizeForBinder(operation.outputs); + + return size; +} + +template <> +size_t sizeForBinder(const hidl_string& name) { + return name.size(); +} + +template <> +size_t sizeForBinder(const hidl_memory& memory) { + // This is just a guess. + + size_t size = 0; + + if (const native_handle_t* handle = memory.handle()) { + size += sizeof(*handle); + size += sizeof(handle->data[0] * (handle->numFds + handle->numInts)); + } + size += sizeForBinder(memory.name()); + + return size; +} + +template <> +size_t sizeForBinder(const Model::ExtensionNameAndPrefix& extensionNameToPrefix) { + size_t size = 0; + + size += sizeForBinder(extensionNameToPrefix.name); + size += sizeForBinder(extensionNameToPrefix.prefix); + + return size; +} + +template <> +size_t sizeForBinder(const Model& model) { + size_t size = 0; + + size += sizeForBinder(model.operands); + size += sizeForBinder(model.operations); + size += sizeForBinder(model.inputIndexes); + size += sizeForBinder(model.outputIndexes); + size += sizeForBinder(model.operandValues); + size += sizeForBinder(model.pools); + size += sizeForBinder(model.relaxComputationFloat32toFloat16); + size += sizeForBinder(model.extensionNameToPrefix); + + return size; +} + +// https://developer.android.com/reference/android/os/TransactionTooLargeException.html +// +// "The Binder transaction buffer has a limited fixed size, +// currently 1Mb, which is shared by all transactions in progress +// for the process." +// +// Will our representation fit under this limit? There are two complications: +// - Our representation size is just approximate (see sizeForBinder()). +// - This object may not be the only occupant of the Binder transaction buffer. +// So we'll be very conservative: We want the representation size to be no +// larger than half the transaction buffer size. +// +// If our representation grows large enough that it still fits within +// the transaction buffer but combined with other transactions may +// exceed the buffer size, then we may see intermittent HAL transport +// errors. +static bool exceedsBinderSizeLimit(size_t representationSize) { + // Instead of using this fixed buffer size, we might instead be able to use + // ProcessState::self()->getMmapSize(). However, this has a potential + // problem: The binder/mmap size of the current process does not necessarily + // indicate the binder/mmap size of the service (i.e., the other process). + // The only way it would be a good indication is if both the current process + // and the service use the default size. + static const size_t kHalfBufferSize = 1024 * 1024 / 2; + + return representationSize > kHalfBufferSize; +} + +///////////////////////// VALIDATE EXECUTION ORDER //////////////////////////// + +static void mutateExecutionOrderTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + const Operation& operationObj = model.operations[operation]; + for (uint32_t input : operationObj.inputs) { + if (model.operands[input].lifetime == OperandLifeTime::TEMPORARY_VARIABLE || + model.operands[input].lifetime == OperandLifeTime::MODEL_OUTPUT) { + // This operation reads an operand written by some + // other operation. Move this operation to the + // beginning of the sequence, ensuring that it reads + // the operand before that operand is written, thereby + // violating execution order rules. + const std::string message = "mutateExecutionOrderTest: operation " + + std::to_string(operation) + " is a reader"; + validate(device, message, model, [operation](Model* model, ExecutionPreference*) { + auto& operations = model->operations; + std::rotate(operations.begin(), operations.begin() + operation, + operations.begin() + operation + 1); + }); + break; // only need to do this once per operation + } + } + for (uint32_t output : operationObj.outputs) { + if (model.operands[output].numberOfConsumers > 0) { + // This operation writes an operand read by some other + // operation. Move this operation to the end of the + // sequence, ensuring that it writes the operand after + // that operand is read, thereby violating execution + // order rules. + const std::string message = "mutateExecutionOrderTest: operation " + + std::to_string(operation) + " is a writer"; + validate(device, message, model, [operation](Model* model, ExecutionPreference*) { + auto& operations = model->operations; + std::rotate(operations.begin() + operation, operations.begin() + operation + 1, + operations.end()); + }); + break; // only need to do this once per operation + } + } + } +} + ///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// static const uint32_t invalidOperandTypes[] = { @@ -251,9 +502,239 @@ static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& m } } +///////////////////////// VALIDATE OPERAND LIFETIME ///////////////////////////////////////////// + +static std::vector<OperandLifeTime> getInvalidLifeTimes(const Model& model, size_t modelSize, + const Operand& operand) { + // TODO: Support OperandLifeTime::CONSTANT_REFERENCE as an invalid lifetime + // TODO: Support OperandLifeTime::NO_VALUE as an invalid lifetime + + // Ways to get an invalid lifetime: + // - change whether a lifetime means an operand should have a writer + std::vector<OperandLifeTime> ret; + switch (operand.lifetime) { + case OperandLifeTime::MODEL_OUTPUT: + case OperandLifeTime::TEMPORARY_VARIABLE: + ret = { + OperandLifeTime::MODEL_INPUT, + OperandLifeTime::CONSTANT_COPY, + }; + break; + case OperandLifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_REFERENCE: + case OperandLifeTime::MODEL_INPUT: + ret = { + OperandLifeTime::TEMPORARY_VARIABLE, + OperandLifeTime::MODEL_OUTPUT, + }; + break; + case OperandLifeTime::NO_VALUE: + // Not enough information to know whether + // TEMPORARY_VARIABLE or CONSTANT_COPY would be invalid -- + // is this operand written (then CONSTANT_COPY would be + // invalid) or not (then TEMPORARY_VARIABLE would be + // invalid)? + break; + default: + ADD_FAILURE(); + break; + } + + const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown + if (!operandSize || + exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) { + // Unknown size or too-large size + ret.erase(std::remove(ret.begin(), ret.end(), OperandLifeTime::CONSTANT_COPY), ret.end()); + } + + return ret; +} + +static void mutateOperandLifeTimeTest(const sp<IDevice>& device, const Model& model) { + const size_t modelSize = sizeForBinder(model); + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector<OperandLifeTime> invalidLifeTimes = + getInvalidLifeTimes(model, modelSize, model.operands[operand]); + for (OperandLifeTime invalidLifeTime : invalidLifeTimes) { + const std::string message = "mutateOperandLifetimeTest: operand " + + std::to_string(operand) + " has lifetime " + + toString(invalidLifeTime) + " instead of lifetime " + + toString(model.operands[operand].lifetime); + validate(device, message, model, + [operand, invalidLifeTime](Model* model, ExecutionPreference*) { + static const DataLocation kZeroDataLocation = {}; + Operand& operandObj = model->operands[operand]; + switch (operandObj.lifetime) { + case OperandLifeTime::MODEL_INPUT: { + hidl_vec_remove(&model->inputIndexes, uint32_t(operand)); + break; + } + case OperandLifeTime::MODEL_OUTPUT: { + hidl_vec_remove(&model->outputIndexes, uint32_t(operand)); + break; + } + default: + break; + } + operandObj.lifetime = invalidLifeTime; + operandObj.location = kZeroDataLocation; + switch (invalidLifeTime) { + case OperandLifeTime::CONSTANT_COPY: { + becomeConstantCopy(model, &operandObj); + break; + } + case OperandLifeTime::MODEL_INPUT: + hidl_vec_push_back(&model->inputIndexes, uint32_t(operand)); + break; + case OperandLifeTime::MODEL_OUTPUT: + hidl_vec_push_back(&model->outputIndexes, uint32_t(operand)); + break; + default: + break; + } + }); + } + } +} + +///////////////////////// VALIDATE OPERAND INPUT-or-OUTPUT ////////////////////////////////////// + +static std::optional<OperandLifeTime> getInputOutputLifeTime(const Model& model, size_t modelSize, + const Operand& operand) { + // Ways to get an invalid lifetime (with respect to model inputIndexes and outputIndexes): + // - change whether a lifetime means an operand is a model input, a model output, or neither + // - preserve whether or not a lifetime means an operand should have a writer + switch (operand.lifetime) { + case OperandLifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_REFERENCE: + return OperandLifeTime::MODEL_INPUT; + case OperandLifeTime::MODEL_INPUT: { + const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown + if (!operandSize || + exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) { + // Unknown size or too-large size + break; + } + return OperandLifeTime::CONSTANT_COPY; + } + case OperandLifeTime::MODEL_OUTPUT: + return OperandLifeTime::TEMPORARY_VARIABLE; + case OperandLifeTime::TEMPORARY_VARIABLE: + return OperandLifeTime::MODEL_OUTPUT; + case OperandLifeTime::NO_VALUE: + // Not enough information to know whether + // TEMPORARY_VARIABLE or CONSTANT_COPY would be an + // appropriate choice -- is this operand written (then + // TEMPORARY_VARIABLE would be appropriate) or not (then + // CONSTANT_COPY would be appropriate)? + break; + default: + ADD_FAILURE(); + break; + } + + return std::nullopt; +} + +static void mutateOperandInputOutputTest(const sp<IDevice>& device, const Model& model) { + const size_t modelSize = sizeForBinder(model); + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::optional<OperandLifeTime> changedLifeTime = + getInputOutputLifeTime(model, modelSize, model.operands[operand]); + if (changedLifeTime) { + const std::string message = "mutateOperandInputOutputTest: operand " + + std::to_string(operand) + " has lifetime " + + toString(*changedLifeTime) + " instead of lifetime " + + toString(model.operands[operand].lifetime); + validate(device, message, model, + [operand, changedLifeTime](Model* model, ExecutionPreference*) { + static const DataLocation kZeroDataLocation = {}; + Operand& operandObj = model->operands[operand]; + operandObj.lifetime = *changedLifeTime; + operandObj.location = kZeroDataLocation; + if (*changedLifeTime == OperandLifeTime::CONSTANT_COPY) { + becomeConstantCopy(model, &operandObj); + } + }); + } + } +} + +///////////////////////// VALIDATE OPERAND NUMBER OF CONSUMERS ////////////////////////////////// + +static std::vector<uint32_t> getInvalidNumberOfConsumers(uint32_t numberOfConsumers) { + if (numberOfConsumers == 0) { + return {1}; + } else { + return {numberOfConsumers - 1, numberOfConsumers + 1}; + } +} + +static void mutateOperandNumberOfConsumersTest(const sp<IDevice>& device, const Model& model) { + for (size_t operand = 0; operand < model.operands.size(); ++operand) { + const std::vector<uint32_t> invalidNumberOfConsumersVec = + getInvalidNumberOfConsumers(model.operands[operand].numberOfConsumers); + for (uint32_t invalidNumberOfConsumers : invalidNumberOfConsumersVec) { + const std::string message = + "mutateOperandNumberOfConsumersTest: operand " + std::to_string(operand) + + " numberOfConsumers = " + std::to_string(invalidNumberOfConsumers); + validate(device, message, model, + [operand, invalidNumberOfConsumers](Model* model, ExecutionPreference*) { + model->operands[operand].numberOfConsumers = invalidNumberOfConsumers; + }); + } + } +} + +///////////////////////// VALIDATE OPERAND NUMBER OF WRITERS //////////////////////////////////// + +static void mutateOperandAddWriterTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t badOutputNum = 0; badOutputNum < model.operations[operation].outputs.size(); + ++badOutputNum) { + const uint32_t outputOperandIndex = model.operations[operation].outputs[badOutputNum]; + const std::string message = "mutateOperandAddWriterTest: operation " + + std::to_string(operation) + " writes to " + + std::to_string(outputOperandIndex); + // We'll insert a copy of the operation, all of whose + // OTHER output operands are newly-created -- i.e., + // there'll only be a duplicate write of ONE of that + // operation's output operands. + validate(device, message, model, + [operation, badOutputNum](Model* model, ExecutionPreference*) { + Operation newOperation = model->operations[operation]; + for (uint32_t input : newOperation.inputs) { + ++model->operands[input].numberOfConsumers; + } + for (size_t outputNum = 0; outputNum < newOperation.outputs.size(); + ++outputNum) { + if (outputNum == badOutputNum) continue; + + Operand operandValue = + model->operands[newOperation.outputs[outputNum]]; + operandValue.numberOfConsumers = 0; + if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) { + operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; + } else { + ASSERT_EQ(operandValue.lifetime, + OperandLifeTime::TEMPORARY_VARIABLE); + } + newOperation.outputs[outputNum] = + hidl_vec_push_back(&model->operands, operandValue); + } + // Where do we insert the extra writer (a new + // operation)? It has to be later than all the + // writers of its inputs. The easiest thing to do + // is to insert it at the end of the operation + // sequence. + hidl_vec_push_back(&model->operations, newOperation); + }); + } + } +} + ///////////////////////// VALIDATE EXTRA ??? ///////////////////////// -// TODO: Operand::lifetime // TODO: Operand::location ///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// @@ -461,6 +942,37 @@ static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, con } } +///////////////////////// VALIDATE MODEL OPERANDS WRITTEN /////////////////////////////////////// + +static void mutateOperationRemoveWriteTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.operations.size(); ++operation) { + for (size_t outputNum = 0; outputNum < model.operations[operation].outputs.size(); + ++outputNum) { + const uint32_t outputOperandIndex = model.operations[operation].outputs[outputNum]; + if (model.operands[outputOperandIndex].numberOfConsumers > 0) { + const std::string message = "mutateOperationRemoveWriteTest: operation " + + std::to_string(operation) + " writes to " + + std::to_string(outputOperandIndex); + validate(device, message, model, + [operation, outputNum](Model* model, ExecutionPreference*) { + uint32_t& outputOperandIndex = + model->operations[operation].outputs[outputNum]; + Operand operandValue = model->operands[outputOperandIndex]; + operandValue.numberOfConsumers = 0; + if (operandValue.lifetime == OperandLifeTime::MODEL_OUTPUT) { + operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; + } else { + ASSERT_EQ(operandValue.lifetime, + OperandLifeTime::TEMPORARY_VARIABLE); + } + outputOperandIndex = + hidl_vec_push_back(&model->operands, operandValue); + }); + } + } + } +} + ///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) { @@ -711,14 +1223,20 @@ static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model ////////////////////////// ENTRY POINT ////////////////////////////// void validateModel(const sp<IDevice>& device, const Model& model) { + mutateExecutionOrderTest(device, model); mutateOperandTypeTest(device, model); mutateOperandRankTest(device, model); mutateOperandScaleTest(device, model); mutateOperandZeroPointTest(device, model); + mutateOperandLifeTimeTest(device, model); + mutateOperandInputOutputTest(device, model); + mutateOperandNumberOfConsumersTest(device, model); + mutateOperandAddWriterTest(device, model); mutateOperationOperandTypeTest(device, model); mutateOperationTypeTest(device, model); mutateOperationInputOperandIndexTest(device, model); mutateOperationOutputOperandIndexTest(device, model); + mutateOperationRemoveWriteTest(device, model); removeOperandTest(device, model); removeOperationTest(device, model); removeOperationInputTest(device, model); diff --git a/neuralnetworks/1.2/vts/functional/include/1.2/Utils.h b/neuralnetworks/1.2/vts/functional/include/1.2/Utils.h new file mode 100644 index 0000000000..61a8d7485a --- /dev/null +++ b/neuralnetworks/1.2/vts/functional/include/1.2/Utils.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_UTILS_H +#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_UTILS_H + +#include <android/hardware/neuralnetworks/1.2/types.h> + +namespace android { +namespace hardware { +namespace neuralnetworks { + +// Returns the amount of space needed to store a value of the specified type. +// +// Aborts if the specified type is an extension type or OEM type. +uint32_t sizeOfData(V1_2::OperandType type); + +// Returns the amount of space needed to store a value of the dimensions and +// type of this operand. For a non-extension, non-OEM tensor with unspecified +// rank or at least one unspecified dimension, returns zero. +// +// Aborts if the specified type is an extension type or OEM type. +uint32_t sizeOfData(const V1_2::Operand& operand); + +} // namespace neuralnetworks +} // namespace hardware +} // namespace android + +#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_2_UTILS_H diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp index 2c1be0b008..771fc54e0d 100644 --- a/neuralnetworks/1.3/vts/functional/Android.bp +++ b/neuralnetworks/1.3/vts/functional/Android.bp @@ -54,7 +54,7 @@ cc_test { ], static_libs: [ "VtsHalNeuralNetworksV1_0_utils", - "VtsHalNeuralNetworksV1_2Callbacks", + "VtsHalNeuralNetworksV1_2_utils", "VtsHalNeuralNetworksV1_3_utils", "android.hardware.neuralnetworks@1.0", "android.hardware.neuralnetworks@1.1", diff --git a/neuralnetworks/1.3/vts/functional/BasicTests.cpp b/neuralnetworks/1.3/vts/functional/BasicTests.cpp index 1c2536983e..6fcfc3482d 100644 --- a/neuralnetworks/1.3/vts/functional/BasicTests.cpp +++ b/neuralnetworks/1.3/vts/functional/BasicTests.cpp @@ -20,11 +20,14 @@ namespace android::hardware::neuralnetworks::V1_3::vts::functional { +using implementation::PreparedModelCallback; using V1_0::DeviceStatus; using V1_0::PerformanceInfo; +using V1_1::ExecutionPreference; using V1_2::Constant; using V1_2::DeviceType; using V1_2::Extension; +using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>; // create device test TEST_P(NeuralnetworksHidlTest, CreateDevice) {} @@ -65,4 +68,143 @@ TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) { }); EXPECT_TRUE(ret.isOk()); } + +// detect cycle +TEST_P(NeuralnetworksHidlTest, CycleTest) { + // opnd0 = TENSOR_FLOAT32 // model input + // opnd1 = TENSOR_FLOAT32 // model input + // opnd2 = INT32 // model input + // opnd3 = ADD(opnd0, opnd4, opnd2) + // opnd4 = ADD(opnd1, opnd3, opnd2) + // opnd5 = ADD(opnd4, opnd0, opnd2) // model output + // + // +-----+ + // | | + // v | + // 3 = ADD(0, 4, 2) | + // | | + // +----------+ | + // | | + // v | + // 4 = ADD(1, 3, 2) | + // | | + // +----------------+ + // | + // | + // +-------+ + // | + // v + // 5 = ADD(4, 0, 2) + + const std::vector<Operand> operands = { + { + // operands[0] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 2, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::SUBGRAPH_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[1] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::SUBGRAPH_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[2] + .type = OperandType::INT32, + .dimensions = {}, + .numberOfConsumers = 3, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::SUBGRAPH_INPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[3] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 1, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[4] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 2, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::TEMPORARY_VARIABLE, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + { + // operands[5] + .type = OperandType::TENSOR_FLOAT32, + .dimensions = {1}, + .numberOfConsumers = 0, + .scale = 0.0f, + .zeroPoint = 0, + .lifetime = OperandLifeTime::SUBGRAPH_OUTPUT, + .location = {.poolIndex = 0, .offset = 0, .length = 0}, + }, + }; + + const std::vector<Operation> operations = { + {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}}, + {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}}, + {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}}, + }; + + Subgraph subgraph = { + .operands = operands, + .operations = operations, + .inputIndexes = {0, 1, 2}, + .outputIndexes = {5}, + }; + const Model model = { + .main = std::move(subgraph), + .referenced = {}, + .operandValues = {}, + .pools = {}, + }; + + // ensure that getSupportedOperations_1_2() checks model validity + ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE; + Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_3( + model, [&model, &supportedOpsErrorStatus](ErrorStatus status, + const hidl_vec<bool>& supported) { + supportedOpsErrorStatus = status; + if (status == ErrorStatus::NONE) { + ASSERT_EQ(supported.size(), model.main.operations.size()); + } + }); + ASSERT_TRUE(supportedOpsReturn.isOk()); + ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT); + + // ensure that prepareModel_1_3() checks model validity + sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback; + Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_3( + model, ExecutionPreference::FAST_SINGLE_ANSWER, Priority::MEDIUM, {}, + hidl_vec<hidl_handle>(), hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback); + ASSERT_TRUE(prepareLaunchReturn.isOk()); + // Note that preparation can fail for reasons other than an + // invalid model (invalid model should result in + // INVALID_ARGUMENT) -- for example, perhaps not all + // operations are supported, or perhaps the device hit some + // kind of capacity limit. + EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE); + EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE); + EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr); +} + } // namespace android::hardware::neuralnetworks::V1_3::vts::functional diff --git a/neuralnetworks/1.3/vts/functional/Utils.cpp b/neuralnetworks/1.3/vts/functional/Utils.cpp index 23e2af823e..c460e1127b 100644 --- a/neuralnetworks/1.3/vts/functional/Utils.cpp +++ b/neuralnetworks/1.3/vts/functional/Utils.cpp @@ -17,11 +17,78 @@ #include "1.3/Utils.h" #include <iostream> +#include <numeric> +#include "android-base/logging.h" +#include "android/hardware/neuralnetworks/1.3/types.h" -namespace android::hardware::neuralnetworks::V1_3 { +namespace android::hardware::neuralnetworks { + +uint32_t sizeOfData(V1_3::OperandType type) { + switch (type) { + case V1_3::OperandType::FLOAT32: + case V1_3::OperandType::INT32: + case V1_3::OperandType::UINT32: + case V1_3::OperandType::TENSOR_FLOAT32: + case V1_3::OperandType::TENSOR_INT32: + return 4; + case V1_3::OperandType::TENSOR_QUANT16_SYMM: + case V1_3::OperandType::TENSOR_FLOAT16: + case V1_3::OperandType::FLOAT16: + case V1_3::OperandType::TENSOR_QUANT16_ASYMM: + return 2; + case V1_3::OperandType::TENSOR_QUANT8_ASYMM: + case V1_3::OperandType::BOOL: + case V1_3::OperandType::TENSOR_BOOL8: + case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + case V1_3::OperandType::TENSOR_QUANT8_SYMM: + case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED: + return 1; + case V1_3::OperandType::SUBGRAPH: + return 0; + default: + CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); + return 0; + } +} + +static bool isTensor(V1_3::OperandType type) { + switch (type) { + case V1_3::OperandType::FLOAT32: + case V1_3::OperandType::INT32: + case V1_3::OperandType::UINT32: + case V1_3::OperandType::FLOAT16: + case V1_3::OperandType::BOOL: + case V1_3::OperandType::SUBGRAPH: + return false; + case V1_3::OperandType::TENSOR_FLOAT32: + case V1_3::OperandType::TENSOR_INT32: + case V1_3::OperandType::TENSOR_QUANT16_SYMM: + case V1_3::OperandType::TENSOR_FLOAT16: + case V1_3::OperandType::TENSOR_QUANT16_ASYMM: + case V1_3::OperandType::TENSOR_QUANT8_ASYMM: + case V1_3::OperandType::TENSOR_BOOL8: + case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: + case V1_3::OperandType::TENSOR_QUANT8_SYMM: + case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED: + return true; + default: + CHECK(false) << "Invalid OperandType " << static_cast<uint32_t>(type); + return false; + } +} + +uint32_t sizeOfData(const V1_3::Operand& operand) { + const uint32_t dataSize = sizeOfData(operand.type); + if (isTensor(operand.type) && operand.dimensions.size() == 0) return 0; + return std::accumulate(operand.dimensions.begin(), operand.dimensions.end(), dataSize, + std::multiplies<>{}); +} + +namespace V1_3 { ::std::ostream& operator<<(::std::ostream& os, ErrorStatus errorStatus) { return os << toString(errorStatus); } -} // namespace android::hardware::neuralnetworks::V1_3 +} // namespace V1_3 +} // namespace android::hardware::neuralnetworks diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp index e590fdad2d..849ef7bf50 100644 --- a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp @@ -16,15 +16,22 @@ #define LOG_TAG "neuralnetworks_hidl_hal_test" +#include <android/hardware/neuralnetworks/1.1/types.h> +#include <android/hardware/neuralnetworks/1.3/types.h> #include "1.0/Utils.h" #include "1.3/Callbacks.h" #include "1.3/Utils.h" #include "GeneratedTestHarness.h" #include "VtsHalNeuralnetworks.h" +#include <optional> +#include <type_traits> +#include <utility> + namespace android::hardware::neuralnetworks::V1_3::vts::functional { using implementation::PreparedModelCallback; +using V1_0::DataLocation; using V1_1::ExecutionPreference; using V1_2::SymmPerChannelQuantParams; using HidlToken = @@ -112,6 +119,262 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { return index; } +// If we introduce a CONSTANT_COPY for an operand of size operandSize, +// how much will this increase the size of the model? This assumes +// that we can (re)use all of model.operandValues for the operand +// value. +static size_t constantCopyExtraSize(const Model& model, size_t operandSize) { + const size_t operandValuesSize = model.operandValues.size(); + return (operandValuesSize < operandSize) ? (operandSize - operandValuesSize) : 0; +} + +// Highly specialized utility routine for converting an operand to +// CONSTANT_COPY lifetime. +// +// Expects that: +// - operand has a known size +// - operand->lifetime has already been set to CONSTANT_COPY +// - operand->location has been zeroed out +// +// Does the following: +// - initializes operand->location to point to the beginning of model->operandValues +// - resizes model->operandValues (if necessary) to be large enough for the operand +// value, padding it with zeroes on the end +// +// Potential problem: +// By changing the operand to CONSTANT_COPY lifetime, this function is effectively initializing the +// operand with unspecified (but deterministic) data. This means that the model may be invalidated +// in two ways: not only is the lifetime of CONSTANT_COPY invalid, but the operand's value in the +// graph may also be invalid (e.g., if the operand is used as an activation code and has an invalid +// value). For now, this should be fine because it just means we're not testing what we think we're +// testing in certain cases; but we can handwave this and assume we're probabilistically likely to +// exercise the validation code over the span of the entire test set and operand space. +// +// Aborts if the specified operand type is an extension type or OEM type. +static void becomeConstantCopy(Model* model, Operand* operand) { + // sizeOfData will abort if the specified type is an extension type or OEM type. + const size_t sizeOfOperand = sizeOfData(*operand); + EXPECT_NE(sizeOfOperand, size_t(0)); + operand->location.poolIndex = 0; + operand->location.offset = 0; + operand->location.length = sizeOfOperand; + if (model->operandValues.size() < sizeOfOperand) { + model->operandValues.resize(sizeOfOperand); + } +} + +// The sizeForBinder() functions estimate the size of the +// representation of a value when sent to binder. It's probably a bit +// of an under-estimate, because we don't know the size of the +// metadata in the binder format (e.g., representation of the size of +// a vector); but at least it adds up "big" things like vector +// contents. However, it doesn't treat inter-field or end-of-struct +// padding in a methodical way -- there's no attempt to be consistent +// in whether or not padding in the native (C++) representation +// contributes to the estimated size for the binder representation; +// and there's no attempt to understand what padding (if any) is +// needed in the binder representation. +// +// This assumes that non-metadata uses a fixed length encoding (e.g., +// a uint32_t is always encoded in sizeof(uint32_t) bytes, rather than +// using an encoding whose length is related to the magnitude of the +// encoded value). + +template <typename Type> +static size_t sizeForBinder(const Type& val) { + static_assert(std::is_trivially_copyable_v<std::remove_reference_t<Type>>, + "expected a trivially copyable type"); + return sizeof(val); +} + +template <typename Type> +static size_t sizeForBinder(const hidl_vec<Type>& vec) { + return std::accumulate(vec.begin(), vec.end(), 0, + [](size_t acc, const Type& x) { return acc + sizeForBinder(x); }); +} + +template <> +size_t sizeForBinder(const SymmPerChannelQuantParams& symmPerChannelQuantParams) { + size_t size = 0; + + size += sizeForBinder(symmPerChannelQuantParams.scales); + size += sizeForBinder(symmPerChannelQuantParams.channelDim); + + return size; +} + +template <> +size_t sizeForBinder(const V1_2::Operand::ExtraParams& extraParams) { + using Discriminator = V1_2::Operand::ExtraParams::hidl_discriminator; + switch (extraParams.getDiscriminator()) { + case Discriminator::none: + return 0; + case Discriminator::channelQuant: + return sizeForBinder(extraParams.channelQuant()); + case Discriminator::extension: + return sizeForBinder(extraParams.extension()); + } + LOG(FATAL) << "Unrecognized extraParams enum: " + << static_cast<int>(extraParams.getDiscriminator()); + return 0; +} + +template <> +size_t sizeForBinder(const Operand& operand) { + size_t size = 0; + + size += sizeForBinder(operand.type); + size += sizeForBinder(operand.dimensions); + size += sizeForBinder(operand.numberOfConsumers); + size += sizeForBinder(operand.scale); + size += sizeForBinder(operand.zeroPoint); + size += sizeForBinder(operand.lifetime); + size += sizeForBinder(operand.location); + size += sizeForBinder(operand.extraParams); + + return size; +} + +template <> +size_t sizeForBinder(const Operation& operation) { + size_t size = 0; + + size += sizeForBinder(operation.type); + size += sizeForBinder(operation.inputs); + size += sizeForBinder(operation.outputs); + + return size; +} + +template <> +size_t sizeForBinder(const hidl_string& name) { + return name.size(); +} + +template <> +size_t sizeForBinder(const hidl_memory& memory) { + // This is just a guess. + + size_t size = 0; + + if (const native_handle_t* handle = memory.handle()) { + size += sizeof(*handle); + size += sizeof(handle->data[0] * (handle->numFds + handle->numInts)); + } + size += sizeForBinder(memory.name()); + + return size; +} + +template <> +size_t sizeForBinder(const Subgraph& subgraph) { + size_t size = 0; + + size += sizeForBinder(subgraph.operands); + size += sizeForBinder(subgraph.operations); + size += sizeForBinder(subgraph.inputIndexes); + size += sizeForBinder(subgraph.outputIndexes); + + return size; +} + +template <> +size_t sizeForBinder(const V1_2::Model::ExtensionNameAndPrefix& extensionNameToPrefix) { + size_t size = 0; + + size += sizeForBinder(extensionNameToPrefix.name); + size += sizeForBinder(extensionNameToPrefix.prefix); + + return size; +} + +template <> +size_t sizeForBinder(const Model& model) { + size_t size = 0; + + size += sizeForBinder(model.main); + size += sizeForBinder(model.referenced); + size += sizeForBinder(model.operandValues); + size += sizeForBinder(model.pools); + size += sizeForBinder(model.relaxComputationFloat32toFloat16); + size += sizeForBinder(model.extensionNameToPrefix); + + return size; +} + +// https://developer.android.com/reference/android/os/TransactionTooLargeException.html +// +// "The Binder transaction buffer has a limited fixed size, +// currently 1Mb, which is shared by all transactions in progress +// for the process." +// +// Will our representation fit under this limit? There are two complications: +// - Our representation size is just approximate (see sizeForBinder()). +// - This object may not be the only occupant of the Binder transaction buffer. +// So we'll be very conservative: We want the representation size to be no +// larger than half the transaction buffer size. +// +// If our representation grows large enough that it still fits within +// the transaction buffer but combined with other transactions may +// exceed the buffer size, then we may see intermittent HAL transport +// errors. +static bool exceedsBinderSizeLimit(size_t representationSize) { + // Instead of using this fixed buffer size, we might instead be able to use + // ProcessState::self()->getMmapSize(). However, this has a potential + // problem: The binder/mmap size of the current process does not necessarily + // indicate the binder/mmap size of the service (i.e., the other process). + // The only way it would be a good indication is if both the current process + // and the service use the default size. + static const size_t kHalfBufferSize = 1024 * 1024 / 2; + + return representationSize > kHalfBufferSize; +} + +///////////////////////// VALIDATE EXECUTION ORDER //////////////////////////// + +static void mutateExecutionOrderTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { + const Operation& operationObj = model.main.operations[operation]; + for (uint32_t input : operationObj.inputs) { + if (model.main.operands[input].lifetime == OperandLifeTime::TEMPORARY_VARIABLE || + model.main.operands[input].lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { + // This operation reads an operand written by some + // other operation. Move this operation to the + // beginning of the sequence, ensuring that it reads + // the operand before that operand is written, thereby + // violating execution order rules. + const std::string message = "mutateExecutionOrderTest: operation " + + std::to_string(operation) + " is a reader"; + validate(device, message, model, + [operation](Model* model, ExecutionPreference*, Priority*) { + auto& operations = model->main.operations; + std::rotate(operations.begin(), operations.begin() + operation, + operations.begin() + operation + 1); + }); + break; // only need to do this once per operation + } + } + for (uint32_t output : operationObj.outputs) { + if (model.main.operands[output].numberOfConsumers > 0) { + // This operation writes an operand read by some other + // operation. Move this operation to the end of the + // sequence, ensuring that it writes the operand after + // that operand is read, thereby violating execution + // order rules. + const std::string message = "mutateExecutionOrderTest: operation " + + std::to_string(operation) + " is a writer"; + validate(device, message, model, + [operation](Model* model, ExecutionPreference*, Priority*) { + auto& operations = model->main.operations; + std::rotate(operations.begin() + operation, + operations.begin() + operation + 1, operations.end()); + }); + break; // only need to do this once per operation + } + } + } +} + ///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// static const uint32_t invalidOperandTypes[] = { @@ -261,9 +524,245 @@ static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& m } } +///////////////////////// VALIDATE OPERAND LIFETIME ///////////////////////////////////////////// + +static std::vector<OperandLifeTime> getInvalidLifeTimes(const Model& model, size_t modelSize, + const Operand& operand) { + // TODO: Support OperandLifeTime::CONSTANT_REFERENCE as an invalid lifetime + // TODO: Support OperandLifeTime::NO_VALUE as an invalid lifetime + + // Ways to get an invalid lifetime: + // - change whether a lifetime means an operand should have a writer + std::vector<OperandLifeTime> ret; + switch (operand.lifetime) { + case OperandLifeTime::SUBGRAPH_OUTPUT: + case OperandLifeTime::TEMPORARY_VARIABLE: + ret = { + OperandLifeTime::SUBGRAPH_INPUT, + OperandLifeTime::CONSTANT_COPY, + }; + break; + case OperandLifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_REFERENCE: + case OperandLifeTime::SUBGRAPH_INPUT: + ret = { + OperandLifeTime::TEMPORARY_VARIABLE, + OperandLifeTime::SUBGRAPH_OUTPUT, + }; + break; + case OperandLifeTime::NO_VALUE: + // Not enough information to know whether + // TEMPORARY_VARIABLE or CONSTANT_COPY would be invalid -- + // is this operand written (then CONSTANT_COPY would be + // invalid) or not (then TEMPORARY_VARIABLE would be + // invalid)? + break; + case OperandLifeTime::SUBGRAPH: + break; + default: + ADD_FAILURE(); + break; + } + + const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown + if (!operandSize || + exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) { + // Unknown size or too-large size + ret.erase(std::remove(ret.begin(), ret.end(), OperandLifeTime::CONSTANT_COPY), ret.end()); + } + + return ret; +} + +static void mutateOperandLifeTimeTest(const sp<IDevice>& device, const Model& model) { + const size_t modelSize = sizeForBinder(model); + for (size_t operand = 0; operand < model.main.operands.size(); ++operand) { + const std::vector<OperandLifeTime> invalidLifeTimes = + getInvalidLifeTimes(model, modelSize, model.main.operands[operand]); + for (OperandLifeTime invalidLifeTime : invalidLifeTimes) { + const std::string message = "mutateOperandLifetimeTest: operand " + + std::to_string(operand) + " has lifetime " + + toString(invalidLifeTime) + " instead of lifetime " + + toString(model.main.operands[operand].lifetime); + validate(device, message, model, + [operand, invalidLifeTime](Model* model, ExecutionPreference*, Priority*) { + static const DataLocation kZeroDataLocation = {}; + Operand& operandObj = model->main.operands[operand]; + switch (operandObj.lifetime) { + case OperandLifeTime::SUBGRAPH_INPUT: { + hidl_vec_remove(&model->main.inputIndexes, uint32_t(operand)); + break; + } + case OperandLifeTime::SUBGRAPH_OUTPUT: { + hidl_vec_remove(&model->main.outputIndexes, uint32_t(operand)); + break; + } + default: + break; + } + operandObj.lifetime = invalidLifeTime; + operandObj.location = kZeroDataLocation; + switch (invalidLifeTime) { + case OperandLifeTime::CONSTANT_COPY: { + becomeConstantCopy(model, &operandObj); + break; + } + case OperandLifeTime::SUBGRAPH_INPUT: + hidl_vec_push_back(&model->main.inputIndexes, uint32_t(operand)); + break; + case OperandLifeTime::SUBGRAPH_OUTPUT: + hidl_vec_push_back(&model->main.outputIndexes, uint32_t(operand)); + break; + default: + break; + } + }); + } + } +} + +///////////////////////// VALIDATE OPERAND INPUT-or-OUTPUT ////////////////////////////////////// + +static std::optional<OperandLifeTime> getInputOutputLifeTime(const Model& model, size_t modelSize, + const Operand& operand) { + // Ways to get an invalid lifetime (with respect to model inputIndexes and outputIndexes): + // - change whether a lifetime means an operand is a model input, a model output, or neither + // - preserve whether or not a lifetime means an operand should have a writer + switch (operand.lifetime) { + case OperandLifeTime::CONSTANT_COPY: + case OperandLifeTime::CONSTANT_REFERENCE: + return OperandLifeTime::SUBGRAPH_INPUT; + case OperandLifeTime::SUBGRAPH_INPUT: { + const size_t operandSize = sizeOfData(operand); // will be zero if shape is unknown + if (!operandSize || + exceedsBinderSizeLimit(modelSize + constantCopyExtraSize(model, operandSize))) { + // Unknown size or too-large size + break; + } + return OperandLifeTime::CONSTANT_COPY; + } + case OperandLifeTime::SUBGRAPH_OUTPUT: + return OperandLifeTime::TEMPORARY_VARIABLE; + case OperandLifeTime::TEMPORARY_VARIABLE: + return OperandLifeTime::SUBGRAPH_OUTPUT; + case OperandLifeTime::NO_VALUE: + // Not enough information to know whether + // TEMPORARY_VARIABLE or CONSTANT_COPY would be an + // appropriate choice -- is this operand written (then + // TEMPORARY_VARIABLE would be appropriate) or not (then + // CONSTANT_COPY would be appropriate)? + break; + case OperandLifeTime::SUBGRAPH: + break; + default: + ADD_FAILURE(); + break; + } + + return std::nullopt; +} + +static void mutateOperandInputOutputTest(const sp<IDevice>& device, const Model& model) { + const size_t modelSize = sizeForBinder(model); + for (size_t operand = 0; operand < model.main.operands.size(); ++operand) { + const std::optional<OperandLifeTime> changedLifeTime = + getInputOutputLifeTime(model, modelSize, model.main.operands[operand]); + if (changedLifeTime) { + const std::string message = "mutateOperandInputOutputTest: operand " + + std::to_string(operand) + " has lifetime " + + toString(*changedLifeTime) + " instead of lifetime " + + toString(model.main.operands[operand].lifetime); + validate(device, message, model, + [operand, changedLifeTime](Model* model, ExecutionPreference*, Priority*) { + static const DataLocation kZeroDataLocation = {}; + Operand& operandObj = model->main.operands[operand]; + operandObj.lifetime = *changedLifeTime; + operandObj.location = kZeroDataLocation; + if (*changedLifeTime == OperandLifeTime::CONSTANT_COPY) { + becomeConstantCopy(model, &operandObj); + } + }); + } + } +} + +///////////////////////// VALIDATE OPERAND NUMBER OF CONSUMERS ////////////////////////////////// + +static std::vector<uint32_t> getInvalidNumberOfConsumers(uint32_t numberOfConsumers) { + if (numberOfConsumers == 0) { + return {1}; + } else { + return {numberOfConsumers - 1, numberOfConsumers + 1}; + } +} + +static void mutateOperandNumberOfConsumersTest(const sp<IDevice>& device, const Model& model) { + for (size_t operand = 0; operand < model.main.operands.size(); ++operand) { + const std::vector<uint32_t> invalidNumberOfConsumersVec = + getInvalidNumberOfConsumers(model.main.operands[operand].numberOfConsumers); + for (uint32_t invalidNumberOfConsumers : invalidNumberOfConsumersVec) { + const std::string message = + "mutateOperandNumberOfConsumersTest: operand " + std::to_string(operand) + + " numberOfConsumers = " + std::to_string(invalidNumberOfConsumers); + validate(device, message, model, + [operand, invalidNumberOfConsumers](Model* model, ExecutionPreference*, + Priority*) { + model->main.operands[operand].numberOfConsumers = invalidNumberOfConsumers; + }); + } + } +} + +///////////////////////// VALIDATE OPERAND NUMBER OF WRITERS //////////////////////////////////// + +static void mutateOperandAddWriterTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { + for (size_t badOutputNum = 0; + badOutputNum < model.main.operations[operation].outputs.size(); ++badOutputNum) { + const uint32_t outputOperandIndex = + model.main.operations[operation].outputs[badOutputNum]; + const std::string message = "mutateOperandAddWriterTest: operation " + + std::to_string(operation) + " writes to " + + std::to_string(outputOperandIndex); + // We'll insert a copy of the operation, all of whose + // OTHER output operands are newly-created -- i.e., + // there'll only be a duplicate write of ONE of that + // operation's output operands. + validate(device, message, model, + [operation, badOutputNum](Model* model, ExecutionPreference*, Priority*) { + Operation newOperation = model->main.operations[operation]; + for (uint32_t input : newOperation.inputs) { + ++model->main.operands[input].numberOfConsumers; + } + for (size_t outputNum = 0; outputNum < newOperation.outputs.size(); + ++outputNum) { + if (outputNum == badOutputNum) continue; + + Operand operandValue = + model->main.operands[newOperation.outputs[outputNum]]; + operandValue.numberOfConsumers = 0; + if (operandValue.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { + operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; + } else { + ASSERT_EQ(operandValue.lifetime, + OperandLifeTime::TEMPORARY_VARIABLE); + } + newOperation.outputs[outputNum] = + hidl_vec_push_back(&model->main.operands, operandValue); + } + // Where do we insert the extra writer (a new + // operation)? It has to be later than all the + // writers of its inputs. The easiest thing to do + // is to insert it at the end of the operation + // sequence. + hidl_vec_push_back(&model->main.operations, newOperation); + }); + } + } +} + ///////////////////////// VALIDATE EXTRA ??? ///////////////////////// -// TODO: Operand::lifetime // TODO: Operand::location ///////////////////////// VALIDATE OPERATION OPERAND TYPE ///////////////////////// @@ -511,6 +1010,37 @@ static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, con } } +///////////////////////// VALIDATE MODEL OPERANDS WRITTEN /////////////////////////////////////// + +static void mutateOperationRemoveWriteTest(const sp<IDevice>& device, const Model& model) { + for (size_t operation = 0; operation < model.main.operations.size(); ++operation) { + for (size_t outputNum = 0; outputNum < model.main.operations[operation].outputs.size(); + ++outputNum) { + const uint32_t outputOperandIndex = model.main.operations[operation].outputs[outputNum]; + if (model.main.operands[outputOperandIndex].numberOfConsumers > 0) { + const std::string message = "mutateOperationRemoveWriteTest: operation " + + std::to_string(operation) + " writes to " + + std::to_string(outputOperandIndex); + validate(device, message, model, + [operation, outputNum](Model* model, ExecutionPreference*, Priority*) { + uint32_t& outputOperandIndex = + model->main.operations[operation].outputs[outputNum]; + Operand operandValue = model->main.operands[outputOperandIndex]; + operandValue.numberOfConsumers = 0; + if (operandValue.lifetime == OperandLifeTime::SUBGRAPH_OUTPUT) { + operandValue.lifetime = OperandLifeTime::TEMPORARY_VARIABLE; + } else { + ASSERT_EQ(operandValue.lifetime, + OperandLifeTime::TEMPORARY_VARIABLE); + } + outputOperandIndex = + hidl_vec_push_back(&model->main.operands, operandValue); + }); + } + } + } +} + ///////////////////////// REMOVE OPERAND FROM EVERYTHING ///////////////////////// static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) { @@ -804,14 +1334,20 @@ static void mutateExecutionPriorityTest(const sp<IDevice>& device, const Model& ////////////////////////// ENTRY POINT ////////////////////////////// void validateModel(const sp<IDevice>& device, const Model& model) { + mutateExecutionOrderTest(device, model); mutateOperandTypeTest(device, model); mutateOperandRankTest(device, model); mutateOperandScaleTest(device, model); mutateOperandZeroPointTest(device, model); + mutateOperandLifeTimeTest(device, model); + mutateOperandInputOutputTest(device, model); + mutateOperandNumberOfConsumersTest(device, model); + mutateOperandAddWriterTest(device, model); mutateOperationOperandTypeTest(device, model); mutateOperationTypeTest(device, model); mutateOperationInputOperandIndexTest(device, model); mutateOperationOutputOperandIndexTest(device, model); + mutateOperationRemoveWriteTest(device, model); removeOperandTest(device, model); removeOperationTest(device, model); removeOperationInputTest(device, model); diff --git a/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h b/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h index 3661b66445..e07e73bde8 100644 --- a/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h +++ b/neuralnetworks/1.3/vts/functional/include/1.3/Utils.h @@ -24,6 +24,18 @@ namespace android::hardware::neuralnetworks { inline constexpr V1_3::Priority kDefaultPriority = V1_3::Priority::MEDIUM; +// Returns the amount of space needed to store a value of the specified type. +// +// Aborts if the specified type is an extension type or OEM type. +uint32_t sizeOfData(V1_3::OperandType type); + +// Returns the amount of space needed to store a value of the dimensions and +// type of this operand. For a non-extension, non-OEM tensor with unspecified +// rank or at least one unspecified dimension, returns zero. +// +// Aborts if the specified type is an extension type or OEM type. +uint32_t sizeOfData(const V1_3::Operand& operand); + } // namespace android::hardware::neuralnetworks namespace android::hardware::neuralnetworks::V1_3 { diff --git a/sensors/common/default/2.X/multihal/HalProxy.cpp b/sensors/common/default/2.X/multihal/HalProxy.cpp index a09e9e938e..75ffc17a67 100644 --- a/sensors/common/default/2.X/multihal/HalProxy.cpp +++ b/sensors/common/default/2.X/multihal/HalProxy.cpp @@ -426,7 +426,7 @@ void HalProxy::initializeSubHalListFromConfigFile(const char* configFileName) { } else { std::string subHalLibraryFile; while (subHalConfigStream >> subHalLibraryFile) { - void* handle = dlopen(subHalLibraryFile.c_str(), RTLD_NOW); + void* handle = getHandleForSubHalSharedObject(subHalLibraryFile); if (handle == nullptr) { ALOGE("dlopen failed for library: %s", subHalLibraryFile.c_str()); } else { @@ -491,6 +491,25 @@ void HalProxy::initializeSensorList() { } } +void* HalProxy::getHandleForSubHalSharedObject(const std::string& filename) { + static const std::string kSubHalShareObjectLocations[] = { + "", // Default locations will be searched +#ifdef __LP64__ + "/vendor/lib64/hw/", "/odm/lib64/hw/" +#else + "/vendor/lib/hw/", "/odm/lib/hw/" +#endif + }; + + for (const std::string& dir : kSubHalShareObjectLocations) { + void* handle = dlopen((dir + filename).c_str(), RTLD_NOW); + if (handle != nullptr) { + return handle; + } + } + return nullptr; +} + void HalProxy::init() { initializeSensorList(); } diff --git a/sensors/common/default/2.X/multihal/include/HalProxy.h b/sensors/common/default/2.X/multihal/include/HalProxy.h index fb0b806bab..35d7c8bae1 100644 --- a/sensors/common/default/2.X/multihal/include/HalProxy.h +++ b/sensors/common/default/2.X/multihal/include/HalProxy.h @@ -267,6 +267,16 @@ class HalProxy : public V2_0::implementation::IScopedWakelockRefCounter, void initializeSensorList(); /** + * Try using the default include directories as well as the directories defined in + * kSubHalShareObjectLocations to get a handle for dlsym for a subhal. + * + * @param filename The file name to search for. + * + * @return The handle or nullptr if search failed. + */ + void* getHandleForSubHalSharedObject(const std::string& filename); + + /** * Calls the helper methods that all ctors use. */ void init(); diff --git a/tv/tuner/1.0/default/Demux.cpp b/tv/tuner/1.0/default/Demux.cpp index 95b4ebc7a6..4e5ae4b626 100644 --- a/tv/tuner/1.0/default/Demux.cpp +++ b/tv/tuner/1.0/default/Demux.cpp @@ -71,7 +71,7 @@ Return<void> Demux::openFilter(const DemuxFilterType& type, uint32_t bufferSize, mUsedFilterIds.insert(filterId); if (cb == nullptr) { - ALOGW("callback can't be null"); + ALOGW("[Demux] callback can't be null"); _hidl_cb(Result::INVALID_ARGUMENT, new Filter()); return Void(); } @@ -82,9 +82,14 @@ Return<void> Demux::openFilter(const DemuxFilterType& type, uint32_t bufferSize, _hidl_cb(Result::UNKNOWN_ERROR, filter); return Void(); } + mFilters[filterId] = filter; + bool result = true; + if (mDvr != nullptr && mDvr->getType() == DvrType::PLAYBACK) { + result = mDvr->addPlaybackFilter(filter); + } - _hidl_cb(Result::SUCCESS, filter); + _hidl_cb(result ? Result::SUCCESS : Result::INVALID_ARGUMENT, filter); return Void(); } @@ -130,7 +135,7 @@ Return<void> Demux::openDvr(DvrType type, uint32_t bufferSize, const sp<IDvrCall ALOGV("%s", __FUNCTION__); if (cb == nullptr) { - ALOGW("DVR callback can't be null"); + ALOGW("[Demux] DVR callback can't be null"); _hidl_cb(Result::INVALID_ARGUMENT, new Dvr()); return Void(); } @@ -174,11 +179,11 @@ Result Demux::removeFilter(uint32_t filterId) { void Demux::startBroadcastTsFilter(vector<uint8_t> data) { set<uint32_t>::iterator it; + uint16_t pid = ((data[1] & 0x1f) << 8) | ((data[2] & 0xff)); + if (DEBUG_DEMUX) { + ALOGW("[Demux] start ts filter pid: %d", pid); + } for (it = mUsedFilterIds.begin(); it != mUsedFilterIds.end(); it++) { - uint16_t pid = ((data[1] & 0x1f) << 8) | ((data[2] & 0xff)); - if (DEBUG_FILTER) { - ALOGW("start ts filter pid: %d", pid); - } if (pid == mFilters[*it]->getTpid()) { mFilters[*it]->updateFilterOutput(data); } @@ -187,10 +192,10 @@ void Demux::startBroadcastTsFilter(vector<uint8_t> data) { void Demux::sendFrontendInputToRecord(vector<uint8_t> data) { set<uint32_t>::iterator it; + if (DEBUG_DEMUX) { + ALOGW("[Demux] update record filter output"); + } for (it = mRecordFilterIds.begin(); it != mRecordFilterIds.end(); it++) { - if (DEBUG_FILTER) { - ALOGW("update record filter output"); - } mFilters[*it]->updateRecordOutput(data); } } diff --git a/tv/tuner/1.0/default/Demux.h b/tv/tuner/1.0/default/Demux.h index 759e348d93..3c91dafbd1 100644 --- a/tv/tuner/1.0/default/Demux.h +++ b/tv/tuner/1.0/default/Demux.h @@ -188,7 +188,7 @@ class Demux : public IDemux { int mPesSizeLeft = 0; vector<uint8_t> mPesOutput; - const bool DEBUG_FILTER = false; + const bool DEBUG_DEMUX = false; }; } // namespace implementation diff --git a/tv/tuner/1.0/default/Dvr.cpp b/tv/tuner/1.0/default/Dvr.cpp index 3088a9d73b..adb263553e 100644 --- a/tv/tuner/1.0/default/Dvr.cpp +++ b/tv/tuner/1.0/default/Dvr.cpp @@ -71,13 +71,10 @@ Return<Result> Dvr::attachFilter(const sp<IFilter>& filter) { } // check if the attached filter is a record filter - mFilters[filterId] = filter; - mIsRecordFilterAttached = true; if (!mDemux->attachRecordFilter(filterId)) { return Result::INVALID_ARGUMENT; } - mDemux->setIsRecording(mIsRecordStarted | mIsRecordFilterAttached); return Result::SUCCESS; } @@ -110,7 +107,6 @@ Return<Result> Dvr::detachFilter(const sp<IFilter>& filter) { // If all the filters are detached, record can't be started if (mFilters.empty()) { mIsRecordFilterAttached = false; - mDemux->setIsRecording(mIsRecordStarted | mIsRecordFilterAttached); } return Result::SUCCESS; @@ -132,8 +128,7 @@ Return<Result> Dvr::start() { pthread_setname_np(mDvrThread, "playback_waiting_loop"); } else if (mType == DvrType::RECORD) { mRecordStatus = RecordStatus::DATA_READY; - mIsRecordStarted = true; - mDemux->setIsRecording(mIsRecordStarted | mIsRecordFilterAttached); + mDemux->setIsRecording(mType == DvrType::RECORD); } // TODO start another thread to send filter status callback to the framework @@ -149,7 +144,7 @@ Return<Result> Dvr::stop() { std::lock_guard<std::mutex> lock(mDvrThreadLock); mIsRecordStarted = false; - mDemux->setIsRecording(mIsRecordStarted | mIsRecordFilterAttached); + mDemux->setIsRecording(false); return Result::SUCCESS; } @@ -175,7 +170,7 @@ bool Dvr::createDvrMQ() { std::unique_ptr<DvrMQ> tmpDvrMQ = std::unique_ptr<DvrMQ>(new (std::nothrow) DvrMQ(mBufferSize, true)); if (!tmpDvrMQ->isValid()) { - ALOGW("Failed to create FMQ of DVR"); + ALOGW("[Dvr] Failed to create FMQ of DVR"); return false; } @@ -256,7 +251,6 @@ bool Dvr::readPlaybackFMQ() { int playbackPacketSize = mDvrSettings.playback().packetSize; vector<uint8_t> dataOutputBuffer; dataOutputBuffer.resize(playbackPacketSize); - // Dispatch the packet to the PID matching filter output buffer for (int i = 0; i < size / playbackPacketSize; i++) { if (!mDvrMQ->read(dataOutputBuffer.data(), playbackPacketSize)) { @@ -283,7 +277,6 @@ void Dvr::startTpidFilter(vector<uint8_t> data) { bool Dvr::startFilterDispatcher() { std::map<uint32_t, sp<IFilter>>::iterator it; - // Handle the output data per filter type for (it = mFilters.begin(); it != mFilters.end(); it++) { if (mDemux->startFilterHandler(it->first) != Result::SUCCESS) { @@ -296,7 +289,10 @@ bool Dvr::startFilterDispatcher() { bool Dvr::writeRecordFMQ(const std::vector<uint8_t>& data) { std::lock_guard<std::mutex> lock(mWriteLock); - ALOGW("[Dvr] write record FMQ"); + if (mRecordStatus == RecordStatus::OVERFLOW) { + ALOGW("[Dvr] stops writing and wait for the client side flushing."); + return true; + } if (mDvrMQ->write(data.data(), data.size())) { mDvrEventFlag->wake(static_cast<uint32_t>(DemuxQueueNotifyBits::DATA_READY)); maySendRecordStatusCallback(); @@ -333,6 +329,27 @@ RecordStatus Dvr::checkRecordStatusChange(uint32_t availableToWrite, uint32_t av return mRecordStatus; } +bool Dvr::addPlaybackFilter(sp<IFilter> filter) { + uint32_t filterId; + Result status; + + filter->getId([&](Result result, uint32_t id) { + filterId = id; + status = result; + }); + + if (status != Result::SUCCESS) { + return false; + } + + mFilters[filterId] = filter; + return true; +} + +DvrType Dvr::getType() { + return mType; +} + } // namespace implementation } // namespace V1_0 } // namespace tuner diff --git a/tv/tuner/1.0/default/Dvr.h b/tv/tuner/1.0/default/Dvr.h index f39d8db152..08afd5dc83 100644 --- a/tv/tuner/1.0/default/Dvr.h +++ b/tv/tuner/1.0/default/Dvr.h @@ -81,6 +81,8 @@ class Dvr : public IDvr { bool createDvrMQ(); void sendBroadcastInputToDvrRecord(vector<uint8_t> byteBuffer); bool writeRecordFMQ(const std::vector<uint8_t>& data); + DvrType getType(); + bool addPlaybackFilter(sp<IFilter> filter); private: // Demux service diff --git a/tv/tuner/1.0/default/Filter.cpp b/tv/tuner/1.0/default/Filter.cpp index dab3c177a5..fef7a3599c 100644 --- a/tv/tuner/1.0/default/Filter.cpp +++ b/tv/tuner/1.0/default/Filter.cpp @@ -149,7 +149,7 @@ bool Filter::createFilterMQ() { std::unique_ptr<FilterMQ> tmpFilterMQ = std::unique_ptr<FilterMQ>(new (std::nothrow) FilterMQ(mBufferSize, true)); if (!tmpFilterMQ->isValid()) { - ALOGW("Failed to create FMQ of filter with id: %d", mFilterId); + ALOGW("[Filter] Failed to create FMQ of filter with id: %d", mFilterId); return false; } @@ -290,13 +290,11 @@ uint16_t Filter::getTpid() { void Filter::updateFilterOutput(vector<uint8_t> data) { std::lock_guard<std::mutex> lock(mFilterOutputLock); - ALOGD("[Filter] filter output updated"); mFilterOutput.insert(mFilterOutput.end(), data.begin(), data.end()); } void Filter::updateRecordOutput(vector<uint8_t> data) { std::lock_guard<std::mutex> lock(mRecordFilterOutputLock); - ALOGD("[Filter] record filter output updated"); mRecordFilterOutput.insert(mRecordFilterOutput.end(), data.begin(), data.end()); } @@ -438,7 +436,6 @@ Result Filter::startMediaFilterHandler() { if (mFilterOutput.empty()) { return Result::SUCCESS; } - for (int i = 0; i < mFilterOutput.size(); i += 188) { if (mPesSizeLeft == 0) { uint32_t prefix = (mFilterOutput[i + 4] << 16) | (mFilterOutput[i + 5] << 8) | diff --git a/tv/tuner/1.0/default/Frontend.h b/tv/tuner/1.0/default/Frontend.h index 8a30b91e87..65537d7a23 100644 --- a/tv/tuner/1.0/default/Frontend.h +++ b/tv/tuner/1.0/default/Frontend.h @@ -76,7 +76,7 @@ class Frontend : public IFrontend { FrontendId mId = 0; bool mIsLocked = false; - const string FRONTEND_STREAM_FILE = "/vendor/etc/dumpTs3.ts"; + const string FRONTEND_STREAM_FILE = "/vendor/etc/segment000000.ts"; std::ifstream mFrontendData; }; diff --git a/tv/tuner/1.0/default/Tuner.cpp b/tv/tuner/1.0/default/Tuner.cpp index b1f2490bb6..821d83f87b 100644 --- a/tv/tuner/1.0/default/Tuner.cpp +++ b/tv/tuner/1.0/default/Tuner.cpp @@ -161,34 +161,27 @@ Return<void> Tuner::getFrontendInfo(FrontendId frontendId, getFrontendInfo_cb _h return Void(); } - switch (mFrontends[frontendId]->getFrontendType()) { - case FrontendType::DVBT: - info.type = FrontendType::DVBT; - break; - default: - vector<FrontendStatusType> statusCaps = { - FrontendStatusType::DEMOD_LOCK, - FrontendStatusType::SNR, - FrontendStatusType::FEC, - FrontendStatusType::MODULATION, - FrontendStatusType::PLP_ID, - FrontendStatusType::LAYER_ERROR, - FrontendStatusType::ATSC3_PLP_INFO, - }; - // assign randomly selected values for testing. - info = { - .type = mFrontends[frontendId]->getFrontendType(), - .minFrequency = 139, - .maxFrequency = 1139, - .minSymbolRate = 45, - .maxSymbolRate = 1145, - .acquireRange = 30, - .exclusiveGroupId = 57, - .statusCaps = statusCaps, - .frontendCaps = mFrontendCaps[frontendId], - }; - break; - } + vector<FrontendStatusType> statusCaps = { + FrontendStatusType::DEMOD_LOCK, + FrontendStatusType::SNR, + FrontendStatusType::FEC, + FrontendStatusType::MODULATION, + FrontendStatusType::PLP_ID, + FrontendStatusType::LAYER_ERROR, + FrontendStatusType::ATSC3_PLP_INFO, + }; + // assign randomly selected values for testing. + info = { + .type = mFrontends[frontendId]->getFrontendType(), + .minFrequency = 139, + .maxFrequency = 1139, + .minSymbolRate = 45, + .maxSymbolRate = 1145, + .acquireRange = 30, + .exclusiveGroupId = 57, + .statusCaps = statusCaps, + .frontendCaps = mFrontendCaps[frontendId], + }; _hidl_cb(Result::SUCCESS, info); return Void(); diff --git a/tv/tuner/1.0/vts/functional/DvrTests.cpp b/tv/tuner/1.0/vts/functional/DvrTests.cpp index 9b24aa7a08..7e7f8e6da8 100644 --- a/tv/tuner/1.0/vts/functional/DvrTests.cpp +++ b/tv/tuner/1.0/vts/functional/DvrTests.cpp @@ -16,17 +16,13 @@ #include "DvrTests.h" -void DvrCallback::startPlaybackInputThread(PlaybackConf playbackConf, +void DvrCallback::startPlaybackInputThread(string& dataInputFile, PlaybackSettings& settings, MQDesc& playbackMQDescriptor) { + mInputDataFile = dataInputFile; + mPlaybackSettings = settings; mPlaybackMQ = std::make_unique<FilterMQ>(playbackMQDescriptor, true /* resetPointers */); EXPECT_TRUE(mPlaybackMQ); - struct PlaybackThreadArgs* threadArgs = - (struct PlaybackThreadArgs*)malloc(sizeof(struct PlaybackThreadArgs)); - threadArgs->user = this; - threadArgs->playbackConf = &playbackConf; - threadArgs->keepWritingPlaybackFMQ = &mKeepWritingPlaybackFMQ; - - pthread_create(&mPlaybackThread, NULL, __threadLoopPlayback, (void*)threadArgs); + pthread_create(&mPlaybackThread, NULL, __threadLoopPlayback, this); pthread_setname_np(mPlaybackThread, "test_playback_input_loop"); } @@ -37,15 +33,13 @@ void DvrCallback::stopPlaybackThread() { android::Mutex::Autolock autoLock(mPlaybackThreadLock); } -void* DvrCallback::__threadLoopPlayback(void* threadArgs) { - DvrCallback* const self = - static_cast<DvrCallback*>(((struct PlaybackThreadArgs*)threadArgs)->user); - self->playbackThreadLoop(((struct PlaybackThreadArgs*)threadArgs)->playbackConf, - ((struct PlaybackThreadArgs*)threadArgs)->keepWritingPlaybackFMQ); +void* DvrCallback::__threadLoopPlayback(void* user) { + DvrCallback* const self = static_cast<DvrCallback*>(user); + self->playbackThreadLoop(); return 0; } -void DvrCallback::playbackThreadLoop(PlaybackConf* playbackConf, bool* keepWritingPlaybackFMQ) { +void DvrCallback::playbackThreadLoop() { android::Mutex::Autolock autoLock(mPlaybackThreadLock); mPlaybackThreadRunning = true; @@ -56,10 +50,10 @@ void DvrCallback::playbackThreadLoop(PlaybackConf* playbackConf, bool* keepWriti android::OK); // open the stream and get its length - std::ifstream inputData(playbackConf->inputDataFile, std::ifstream::binary); - int writeSize = playbackConf->setting.packetSize * 6; + std::ifstream inputData(mInputDataFile.c_str(), std::ifstream::binary); + int writeSize = mPlaybackSettings.packetSize * 6; char* buffer = new char[writeSize]; - ALOGW("[vts] playback thread loop start %s", playbackConf->inputDataFile.c_str()); + ALOGW("[vts] playback thread loop start %s!", mInputDataFile.c_str()); if (!inputData.is_open()) { mPlaybackThreadRunning = false; ALOGW("[vts] Error %s", strerror(errno)); @@ -67,7 +61,7 @@ void DvrCallback::playbackThreadLoop(PlaybackConf* playbackConf, bool* keepWriti while (mPlaybackThreadRunning) { // move the stream pointer for packet size * 6 every read until the end - while (*keepWritingPlaybackFMQ) { + while (mKeepWritingPlaybackFMQ) { inputData.read(buffer, writeSize); if (!inputData) { int leftSize = inputData.gcount(); @@ -105,6 +99,7 @@ void DvrCallback::testRecordOutput() { while (mDataOutputBuffer.empty()) { if (-ETIMEDOUT == mMsgCondition.waitRelative(mMsgLock, WAIT_TIMEOUT)) { EXPECT_TRUE(false) << "record output matching pid does not output within timeout"; + stopRecordThread(); return; } } @@ -138,6 +133,7 @@ void DvrCallback::recordThreadLoop(RecordSettings* /*recordSettings*/, bool* kee ALOGD("[vts] DvrCallback record threadLoop start."); android::Mutex::Autolock autoLock(mRecordThreadLock); mRecordThreadRunning = true; + mKeepReadingRecordFMQ = true; // Create the EventFlag that is used to signal the HAL impl that data have been // read from the Record FMQ @@ -183,7 +179,6 @@ bool DvrCallback::readRecordFMQ() { void DvrCallback::stopRecordThread() { mKeepReadingRecordFMQ = false; mRecordThreadRunning = false; - android::Mutex::Autolock autoLock(mRecordThreadLock); } AssertionResult DvrTests::openDvrInDemux(DvrType type, uint32_t bufferSize) { @@ -198,6 +193,9 @@ AssertionResult DvrTests::openDvrInDemux(DvrType type, uint32_t bufferSize) { status = result; }); + if (status == Result::SUCCESS) { + mDvrCallback->setDvr(mDvr); + } return AssertionResult(status == Result::SUCCESS); } diff --git a/tv/tuner/1.0/vts/functional/DvrTests.h b/tv/tuner/1.0/vts/functional/DvrTests.h index d60ce2bd9d..dd00c27431 100644 --- a/tv/tuner/1.0/vts/functional/DvrTests.h +++ b/tv/tuner/1.0/vts/functional/DvrTests.h @@ -54,15 +54,10 @@ using android::hardware::tv::tuner::V1_0::Result; #define WAIT_TIMEOUT 3000000000 -struct PlaybackConf { - string inputDataFile; - PlaybackSettings setting; -}; - class DvrCallback : public IDvrCallback { public: virtual Return<void> onRecordStatus(DemuxFilterStatus status) override { - ALOGW("[vts] record status %hhu", status); + ALOGD("[vts] record status %hhu", status); switch (status) { case DemuxFilterStatus::DATA_READY: break; @@ -70,7 +65,12 @@ class DvrCallback : public IDvrCallback { break; case DemuxFilterStatus::HIGH_WATER: case DemuxFilterStatus::OVERFLOW: - ALOGW("[vts] record overflow. Flushing"); + ALOGD("[vts] record overflow. Flushing."); + EXPECT_TRUE(mDvr) << "Dvr callback is not set with an IDvr"; + if (mDvr) { + Result result = mDvr->flush(); + ALOGD("[vts] Flushing result %d.", result); + } break; } return Void(); @@ -78,16 +78,16 @@ class DvrCallback : public IDvrCallback { virtual Return<void> onPlaybackStatus(PlaybackStatus status) override { // android::Mutex::Autolock autoLock(mMsgLock); - ALOGW("[vts] playback status %d", status); + ALOGD("[vts] playback status %d", status); switch (status) { case PlaybackStatus::SPACE_EMPTY: case PlaybackStatus::SPACE_ALMOST_EMPTY: - ALOGW("[vts] keep playback inputing %d", status); + ALOGD("[vts] keep playback inputing %d", status); mKeepWritingPlaybackFMQ = true; break; case PlaybackStatus::SPACE_ALMOST_FULL: case PlaybackStatus::SPACE_FULL: - ALOGW("[vts] stop playback inputing %d", status); + ALOGD("[vts] stop playback inputing %d", status); mKeepWritingPlaybackFMQ = false; break; } @@ -98,21 +98,19 @@ class DvrCallback : public IDvrCallback { void testRecordOutput(); void stopRecordThread(); - void startPlaybackInputThread(PlaybackConf playbackConf, MQDesc& playbackMQDescriptor); + void startPlaybackInputThread(string& dataInputFile, PlaybackSettings& settings, + MQDesc& playbackMQDescriptor); void startRecordOutputThread(RecordSettings recordSettings, MQDesc& recordMQDescriptor); - static void* __threadLoopPlayback(void* threadArgs); + static void* __threadLoopPlayback(void* user); static void* __threadLoopRecord(void* threadArgs); - void playbackThreadLoop(PlaybackConf* playbackConf, bool* keepWritingPlaybackFMQ); + void playbackThreadLoop(); void recordThreadLoop(RecordSettings* recordSetting, bool* keepWritingPlaybackFMQ); bool readRecordFMQ(); + void setDvr(sp<IDvr> dvr) { mDvr = dvr; } + private: - struct PlaybackThreadArgs { - DvrCallback* user; - PlaybackConf* playbackConf; - bool* keepWritingPlaybackFMQ; - }; struct RecordThreadArgs { DvrCallback* user; RecordSettings* recordSettings; @@ -137,6 +135,10 @@ class DvrCallback : public IDvrCallback { bool mRecordThreadRunning; pthread_t mPlaybackThread; pthread_t mRecordThread; + string mInputDataFile; + PlaybackSettings mPlaybackSettings; + + sp<IDvr> mDvr = nullptr; // int mPidFilterOutputCount = 0; }; @@ -147,11 +149,7 @@ class DvrTests { void setDemux(sp<IDemux> demux) { mDemux = demux; } void startPlaybackInputThread(string& dataInputFile, PlaybackSettings& settings) { - PlaybackConf conf{ - .inputDataFile = dataInputFile, - .setting = settings, - }; - mDvrCallback->startPlaybackInputThread(conf, mDvrMQDescriptor); + mDvrCallback->startPlaybackInputThread(dataInputFile, settings, mDvrMQDescriptor); }; void startRecordOutputThread(RecordSettings settings) { diff --git a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp index c5b159f4f3..c44f77d1a1 100644 --- a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp +++ b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp @@ -130,7 +130,6 @@ void TunerPlaybackHidlTest::playbackSingleFilterTest(FilterConfig filterConf, Dv uint32_t demuxId; sp<IDemux> demux; uint32_t filterId; - sp<IFilter> filter; ASSERT_TRUE(mDemuxTests.openDemux(demux, demuxId)); mFilterTests.setDemux(demux); @@ -142,8 +141,6 @@ void TunerPlaybackHidlTest::playbackSingleFilterTest(FilterConfig filterConf, Dv ASSERT_TRUE(mFilterTests.getNewlyOpenedFilterId(filterId)); ASSERT_TRUE(mFilterTests.configFilter(filterConf.settings, filterId)); ASSERT_TRUE(mFilterTests.getFilterMQDescriptor(filterId)); - filter = mFilterTests.getFilterById(filterId); - ASSERT_TRUE(filter != nullptr); mDvrTests.startPlaybackInputThread(dvrConf.playbackInputFile, dvrConf.settings.playback()); ASSERT_TRUE(mDvrTests.startDvr()); ASSERT_TRUE(mFilterTests.startFilter(filterId)); @@ -181,12 +178,14 @@ void TunerRecordHidlTest::recordSingleFilterTest(FilterConfig filterConf, ASSERT_TRUE(mFilterTests.getFilterMQDescriptor(filterId)); filter = mFilterTests.getFilterById(filterId); ASSERT_TRUE(filter != nullptr); - ASSERT_TRUE(mDvrTests.attachFilterToDvr(filter)); mDvrTests.startRecordOutputThread(dvrConf.settings.record()); + ASSERT_TRUE(mDvrTests.attachFilterToDvr(filter)); ASSERT_TRUE(mDvrTests.startDvr()); ASSERT_TRUE(mFilterTests.startFilter(filterId)); + ASSERT_TRUE(mFrontendTests.tuneFrontend(frontendConf)); mDvrTests.testRecordOutput(); mDvrTests.stopRecordThread(); + ASSERT_TRUE(mFrontendTests.stopTuneFrontend()); ASSERT_TRUE(mFilterTests.stopFilter(filterId)); ASSERT_TRUE(mDvrTests.stopDvr()); ASSERT_TRUE(mDvrTests.detachFilterToDvr(filter)); @@ -280,11 +279,6 @@ TEST_P(TunerBroadcastHidlTest, BroadcastDataFlowAudioFilterTest) { broadcastSingleFilterTest(filterArray[TS_AUDIO0], frontendArray[DVBS]); } -TEST_P(TunerBroadcastHidlTest, BroadcastDataFlowTsFilterTest) { - description("Test TS Filter functionality in Broadcast use case."); - broadcastSingleFilterTest(filterArray[TS_TS0], frontendArray[DVBS]); -} - TEST_P(TunerBroadcastHidlTest, BroadcastDataFlowSectionFilterTest) { description("Test Section Filter functionality in Broadcast use case."); broadcastSingleFilterTest(filterArray[TS_SECTION0], frontendArray[DVBS]); @@ -295,9 +289,9 @@ TEST_P(TunerBroadcastHidlTest, IonBufferTest) { broadcastSingleFilterTest(filterArray[TS_VIDEO0], frontendArray[DVBS]); } -TEST_P(TunerPlaybackHidlTest, PlaybackDataFlowWithTsRecordFilterTest) { - description("Feed ts data from playback and configure Ts filter to get output"); - playbackSingleFilterTest(filterArray[TS_VIDEO1], dvrArray[DVR_PLAYBACK0]); +TEST_P(TunerPlaybackHidlTest, PlaybackDataFlowWithTsSectionFilterTest) { + description("Feed ts data from playback and configure Ts section filter to get output"); + playbackSingleFilterTest(filterArray[TS_SECTION0], dvrArray[DVR_PLAYBACK0]); } TEST_P(TunerRecordHidlTest, AttachFiltersToRecordTest) { diff --git a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h index a9f892262c..b84013b665 100644 --- a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h +++ b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h @@ -153,18 +153,18 @@ inline void initFilterConfig() { filterArray[TS_VIDEO0].type.mainType = DemuxFilterMainType::TS; filterArray[TS_VIDEO0].type.subType.tsFilterType(DemuxTsFilterType::VIDEO); filterArray[TS_VIDEO0].bufferSize = FMQ_SIZE_16M; - filterArray[TS_VIDEO0].settings.ts().tpid = 119; + filterArray[TS_VIDEO0].settings.ts().tpid = 256; filterArray[TS_VIDEO0].settings.ts().filterSettings.av({.isPassthrough = false}); filterArray[TS_VIDEO1].type.mainType = DemuxFilterMainType::TS; filterArray[TS_VIDEO1].type.subType.tsFilterType(DemuxTsFilterType::VIDEO); filterArray[TS_VIDEO1].bufferSize = FMQ_SIZE_16M; - filterArray[TS_VIDEO1].settings.ts().tpid = 81; + filterArray[TS_VIDEO1].settings.ts().tpid = 256; filterArray[TS_VIDEO1].settings.ts().filterSettings.av({.isPassthrough = false}); // TS AUDIO filter setting filterArray[TS_AUDIO0].type.mainType = DemuxFilterMainType::TS; filterArray[TS_AUDIO0].type.subType.tsFilterType(DemuxTsFilterType::AUDIO); filterArray[TS_AUDIO0].bufferSize = FMQ_SIZE_16M; - filterArray[TS_AUDIO0].settings.ts().tpid = 84; + filterArray[TS_AUDIO0].settings.ts().tpid = 256; filterArray[TS_AUDIO0].settings.ts().filterSettings.av({.isPassthrough = false}); // TS PES filter setting filterArray[TS_PES0].type.mainType = DemuxFilterMainType::TS; @@ -179,19 +179,19 @@ inline void initFilterConfig() { filterArray[TS_PCR0].type.mainType = DemuxFilterMainType::TS; filterArray[TS_PCR0].type.subType.tsFilterType(DemuxTsFilterType::PCR); filterArray[TS_PCR0].bufferSize = FMQ_SIZE_16M; - filterArray[TS_PCR0].settings.ts().tpid = 81; + filterArray[TS_PCR0].settings.ts().tpid = 256; filterArray[TS_PCR0].settings.ts().filterSettings.noinit(); // TS filter setting filterArray[TS_TS0].type.mainType = DemuxFilterMainType::TS; filterArray[TS_TS0].type.subType.tsFilterType(DemuxTsFilterType::TS); filterArray[TS_TS0].bufferSize = FMQ_SIZE_16M; - filterArray[TS_TS0].settings.ts().tpid = 18; + filterArray[TS_TS0].settings.ts().tpid = 256; filterArray[TS_TS0].settings.ts().filterSettings.noinit(); // TS SECTION filter setting filterArray[TS_SECTION0].type.mainType = DemuxFilterMainType::TS; filterArray[TS_SECTION0].type.subType.tsFilterType(DemuxTsFilterType::SECTION); filterArray[TS_SECTION0].bufferSize = FMQ_SIZE_16M; - filterArray[TS_SECTION0].settings.ts().tpid = 48; + filterArray[TS_SECTION0].settings.ts().tpid = 256; filterArray[TS_SECTION0].settings.ts().filterSettings.section({ .isRaw = false, }); @@ -224,7 +224,7 @@ inline void initDvrConfig() { .packetSize = 188, }; dvrArray[DVR_PLAYBACK0].type = DvrType::PLAYBACK; - dvrArray[DVR_PLAYBACK0].playbackInputFile = "/vendor/etc/test1.ts"; + dvrArray[DVR_PLAYBACK0].playbackInputFile = "/vendor/etc/segment000000.ts"; dvrArray[DVR_PLAYBACK0].bufferSize = FMQ_SIZE_4M; dvrArray[DVR_PLAYBACK0].settings.playback(playbackSettings); }; diff --git a/wifi/1.4/default/Android.mk b/wifi/1.4/default/Android.mk index c481bc61af..f566b80822 100644 --- a/wifi/1.4/default/Android.mk +++ b/wifi/1.4/default/Android.mk @@ -36,6 +36,9 @@ endif ifdef WIFI_HIDL_FEATURE_DISABLE_AP_MAC_RANDOMIZATION LOCAL_CPPFLAGS += -DWIFI_HIDL_FEATURE_DISABLE_AP_MAC_RANDOMIZATION endif +ifdef WIFI_AVOID_IFACE_RESET_MAC_CHANGE +LOCAL_CPPFLAGS += -DWIFI_AVOID_IFACE_RESET_MAC_CHANGE +endif ifdef QC_WIFI_HIDL_FEATURE_DUAL_AP LOCAL_CPPFLAGS += -DQC_WIFI_HIDL_FEATURE_DUAL_AP endif diff --git a/wifi/1.4/default/wifi_chip.cpp b/wifi/1.4/default/wifi_chip.cpp index 2fa0cb833c..b667b664e0 100644 --- a/wifi/1.4/default/wifi_chip.cpp +++ b/wifi/1.4/default/wifi_chip.cpp @@ -102,6 +102,16 @@ std::string getWlanIfaceName(unsigned idx) { return "wlan" + std::to_string(idx); } +// Returns the dedicated iface name if one is defined. +std::string getApIfaceName() { + std::array<char, PROPERTY_VALUE_MAX> buffer; + if (property_get("ro.vendor.wifi.sap.interface", buffer.data(), nullptr) == + 0) { + return {}; + } + return buffer.data(); +} + std::string getP2pIfaceName() { std::array<char, PROPERTY_VALUE_MAX> buffer; property_get("wifi.direct.interface", buffer.data(), "p2p0"); @@ -1623,6 +1633,11 @@ std::string WifiChip::allocateApOrStaIfaceName(uint32_t start_idx) { // AP iface names start with idx 1 for modes supporting // concurrent STA, else start with idx 0. std::string WifiChip::allocateApIfaceName() { + // Check if we have a dedicated iface for AP. + std::string ifname = getApIfaceName(); + if (!ifname.empty()) { + return ifname; + } return allocateApOrStaIfaceName( isStaApConcurrencyAllowedInCurrentMode() ? 1 : 0); } diff --git a/wifi/1.4/default/wifi_iface_util.cpp b/wifi/1.4/default/wifi_iface_util.cpp index 1d15d2a57b..bb1e5deeab 100644 --- a/wifi/1.4/default/wifi_iface_util.cpp +++ b/wifi/1.4/default/wifi_iface_util.cpp @@ -55,18 +55,22 @@ std::array<uint8_t, 6> WifiIfaceUtil::getFactoryMacAddress( bool WifiIfaceUtil::setMacAddress(const std::string& iface_name, const std::array<uint8_t, 6>& mac) { +#ifndef WIFI_AVOID_IFACE_RESET_MAC_CHANGE if (!iface_tool_.lock()->SetUpState(iface_name.c_str(), false)) { LOG(ERROR) << "SetUpState(false) failed."; return false; } +#endif if (!iface_tool_.lock()->SetMacAddress(iface_name.c_str(), mac)) { LOG(ERROR) << "SetMacAddress failed."; return false; } +#ifndef WIFI_AVOID_IFACE_RESET_MAC_CHANGE if (!iface_tool_.lock()->SetUpState(iface_name.c_str(), true)) { LOG(ERROR) << "SetUpState(true) failed."; return false; } +#endif IfaceEventHandlers event_handlers = {}; const auto it = event_handlers_map_.find(iface_name); if (it != event_handlers_map_.end()) { diff --git a/wifi/supplicant/1.3/vts/functional/supplicant_sta_iface_hidl_test.cpp b/wifi/supplicant/1.3/vts/functional/supplicant_sta_iface_hidl_test.cpp index 3754520eeb..40202980bc 100644 --- a/wifi/supplicant/1.3/vts/functional/supplicant_sta_iface_hidl_test.cpp +++ b/wifi/supplicant/1.3/vts/functional/supplicant_sta_iface_hidl_test.cpp @@ -64,6 +64,7 @@ class SupplicantStaIfaceHidlTest isP2pOn_ = testing::deviceSupportsFeature("android.hardware.wifi.direct"); + stopSupplicant(wifi_v1_0_instance_name_); startSupplicantAndWaitForHidlService(wifi_v1_0_instance_name_, supplicant_v1_3_instance_name_); supplicant_ = diff --git a/wifi/supplicant/1.3/vts/functional/supplicant_sta_network_hidl_test.cpp b/wifi/supplicant/1.3/vts/functional/supplicant_sta_network_hidl_test.cpp index 9c40de1cb3..7603c5b914 100644 --- a/wifi/supplicant/1.3/vts/functional/supplicant_sta_network_hidl_test.cpp +++ b/wifi/supplicant/1.3/vts/functional/supplicant_sta_network_hidl_test.cpp @@ -51,6 +51,8 @@ class SupplicantStaNetworkHidlTest supplicant_v1_3_instance_name_ = std::get<1>(GetParam()); isP2pOn_ = testing::deviceSupportsFeature("android.hardware.wifi.direct"); + + stopSupplicant(wifi_v1_0_instance_name_); startSupplicantAndWaitForHidlService(wifi_v1_0_instance_name_, supplicant_v1_3_instance_name_); supplicant_ = |