diff options
Diffstat (limited to 'neuralnetworks/1.2/vts/functional/ValidateModel.cpp')
-rw-r--r-- | neuralnetworks/1.2/vts/functional/ValidateModel.cpp | 55 |
1 files changed, 46 insertions, 9 deletions
diff --git a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp index 11284ce0e7..3b8e3dd4c2 100644 --- a/neuralnetworks/1.2/vts/functional/ValidateModel.cpp +++ b/neuralnetworks/1.2/vts/functional/ValidateModel.cpp @@ -128,10 +128,10 @@ static uint32_t addOperand(Model* model, OperandLifeTime lifetime) { ///////////////////////// VALIDATE MODEL OPERAND TYPE ///////////////////////// static const uint32_t invalidOperandTypes[] = { - static_cast<uint32_t>(OperandTypeRange::OPERAND_FUNDAMENTAL_MIN) - 1, - static_cast<uint32_t>(OperandTypeRange::OPERAND_FUNDAMENTAL_MAX) + 1, - static_cast<uint32_t>(OperandTypeRange::OPERAND_OEM_MIN) - 1, - static_cast<uint32_t>(OperandTypeRange::OPERAND_OEM_MAX) + 1, + static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN) - 1, + static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX) + 1, + static_cast<uint32_t>(OperandTypeRange::OEM_MIN) - 1, + static_cast<uint32_t>(OperandTypeRange::OEM_MAX) + 1, }; static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) { @@ -157,10 +157,12 @@ static uint32_t getInvalidRank(OperandType type) { case OperandType::UINT32: case OperandType::BOOL: return 1; + case OperandType::TENSOR_BOOL8: case OperandType::TENSOR_FLOAT16: case OperandType::TENSOR_FLOAT32: case OperandType::TENSOR_INT32: case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT8_SYMM: case OperandType::TENSOR_QUANT16_ASYMM: case OperandType::TENSOR_QUANT16_SYMM: case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: @@ -193,12 +195,14 @@ static float getInvalidScale(OperandType type) { case OperandType::INT32: case OperandType::UINT32: case OperandType::BOOL: + case OperandType::TENSOR_BOOL8: case OperandType::TENSOR_FLOAT16: case OperandType::TENSOR_FLOAT32: case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: return 1.0f; case OperandType::TENSOR_INT32: return -1.0f; + case OperandType::TENSOR_QUANT8_SYMM: case OperandType::TENSOR_QUANT8_ASYMM: case OperandType::TENSOR_QUANT16_ASYMM: case OperandType::TENSOR_QUANT16_SYMM: @@ -228,6 +232,7 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) { case OperandType::INT32: case OperandType::UINT32: case OperandType::BOOL: + case OperandType::TENSOR_BOOL8: case OperandType::TENSOR_FLOAT16: case OperandType::TENSOR_FLOAT32: case OperandType::TENSOR_INT32: @@ -235,6 +240,8 @@ static std::vector<int32_t> getInvalidZeroPoints(OperandType type) { return {1}; case OperandType::TENSOR_QUANT8_ASYMM: return {-1, 256}; + case OperandType::TENSOR_QUANT8_SYMM: + return {-129, -1, 1, 128}; case OperandType::TENSOR_QUANT16_ASYMM: return {-1, 65536}; case OperandType::TENSOR_QUANT16_SYMM: @@ -279,6 +286,7 @@ static void mutateOperand(Operand* operand, OperandType type) { newOperand.scale = 0.0f; newOperand.zeroPoint = 0; break; + case OperandType::TENSOR_BOOL8: case OperandType::TENSOR_FLOAT16: case OperandType::TENSOR_FLOAT32: newOperand.dimensions = @@ -292,6 +300,7 @@ static void mutateOperand(Operand* operand, OperandType type) { newOperand.zeroPoint = 0; break; case OperandType::TENSOR_QUANT8_ASYMM: + case OperandType::TENSOR_QUANT8_SYMM: case OperandType::TENSOR_QUANT16_ASYMM: case OperandType::TENSOR_QUANT16_SYMM: newOperand.dimensions = @@ -334,9 +343,14 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con // TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM). // - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32. + // - DEQUANTIZE input can be any of + // TENSOR_(QUANT8_ASYMM|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), output can + // be of either TENSOR_FLOAT16 or TENSOR_FLOAT32. + // - QUANTIZE input can be either TENSOR_FLOAT16 or TENSOR_FLOAT32 // - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL // - DEPTHWISE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL // - GROUPED_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL + // - TRANSPOSE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL switch (operation.type) { case OperationType::LSH_PROJECTION: { if (operand == operation.inputs[1]) { @@ -351,11 +365,26 @@ static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, con return true; } } break; + case OperationType::QUANTIZE: case OperationType::RANDOM_MULTINOMIAL: { - if (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32) { + if (operand == operation.inputs[0] && + (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) { return true; } } break; + case OperationType::DEQUANTIZE: { + if (operand == operation.inputs[0] && + (type == OperandType::TENSOR_QUANT8_ASYMM || + type == OperandType::TENSOR_QUANT8_SYMM || + type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) { + return true; + } + if (operand == operation.outputs[0] && + (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) { + return true; + } + } break; + case OperationType::TRANSPOSE_CONV_2D: case OperationType::GROUPED_CONV_2D: case OperationType::DEPTHWISE_CONV_2D: case OperationType::CONV_2D: { @@ -390,10 +419,9 @@ static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Mode ///////////////////////// VALIDATE MODEL OPERATION TYPE ///////////////////////// static const uint32_t invalidOperationTypes[] = { - static_cast<uint32_t>(OperationTypeRange::OPERATION_FUNDAMENTAL_MIN) - 1, - static_cast<uint32_t>(OperationTypeRange::OPERATION_FUNDAMENTAL_MAX) + 1, - static_cast<uint32_t>(OperationTypeRange::OPERATION_OEM_MIN) - 1, - static_cast<uint32_t>(OperationTypeRange::OPERATION_OEM_MAX) + 1, + static_cast<uint32_t>(OperationTypeRange::FUNDAMENTAL_MAX) + 1, + static_cast<uint32_t>(OperationTypeRange::OEM_MIN) - 1, + static_cast<uint32_t>(OperationTypeRange::OEM_MAX) + 1, }; static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& model) { @@ -477,6 +505,15 @@ static bool removeOperandSkip(size_t operand, const Model& model) { } } } + // BIDIRECTIONAL_SEQUENCE_RNN can have either on or two outputs + // depending on a mergeOutputs parameter + if (operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_RNN) { + for (const size_t outOprand : operation.outputs) { + if (operand == outOprand) { + return true; + } + } + } } return false; } |