diff options
author | Xusong Wang <xusongw@google.com> | 2019-06-10 07:37:42 -0700 |
---|---|---|
committer | Xusong Wang <xusongw@google.com> | 2019-06-11 09:55:41 -0700 |
commit | 92dfcd3899bba47d1db562365635c165292fa514 (patch) | |
tree | 886964d3cebc9fcd2f322fa586b41bda62f96ff3 | |
parent | 15eab11550666b34b210d7603a2d5b7ba35e7114 (diff) | |
download | ml-92dfcd3899bba47d1db562365635c165292fa514.tar.gz |
Add RandomOperand flags: doNotCheckAccuracy and doNotConnect.
This CL provides the following fixes:
* Should not check accuracy of TOPK_V2 second output
We should not check the second output as the sorting is not required
to be stable.
The second output of TOPK_V2 is marked as doNotCheckAccuracy and
doNotConnect.
* Handle NaN/Infinity float values in RGG
Some operations that may produce invalid floating point values,
e.g. NaN, Inf. We should not connect the output tensor of such
operation to the input of another operation, as some floating point
operations are undefined upon NaN and Inf.
A complete list of such operations:
* ANEURALNETWORKS_DIV
* ANEURALNETWORKS_LOG
* ANEURALNETWORKS_POW
* ANEURALNETWORKS_SQRT
* ANEURALNETWORKS_RSQRT
* ANEURALNETWORKS_L2_NORMALIZATION
* ANEURALNETWORKS_REDUCE_PROD
The output of these operations are marked as doNotConnect.
Bug: 134800514
Bug: 134753636
Test: NNT_static_fuzzing
Change-Id: Id71244c5fe3b26ba9a483c361867a3021c7de20e
7 files changed, 83 insertions, 33 deletions
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp index 2ad183b48..96ac5d11d 100644 --- a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp +++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp @@ -52,7 +52,7 @@ std::vector<uint32_t> RandomOperand::getDimensions() const { bool RandomOperand::createEdgeIfValid(const RandomOperand& other) const { if (other.type != RandomOperandType::INPUT) return false; if (dataType != other.dataType || dimensions.size() != other.dimensions.size() || - scale != other.scale || zeroPoint != other.zeroPoint) + scale != other.scale || zeroPoint != other.zeroPoint || doNotConnect || other.doNotConnect) return false; return RandomVariableNetwork::get()->setEqualIfCompatible(dimensions, other.dimensions); } @@ -340,35 +340,38 @@ void RandomGraph::checkResults(const std::vector<OperandBuffer>& buffers, int i = 0; for (const auto& op : mOperands) { if (op->type == RandomOperandType::OUTPUT) { - SCOPED_TRACE(testing::Message() << "When comparing output " << op->ioIndex - << " of type " << toString(op->dataType)); - switch (op->dataType) { - case Type::TENSOR_FLOAT32: - expectNear<float>(*op, buffers[i], criteria.float32); - break; - case Type::TENSOR_FLOAT16: - expectNear<_Float16>(*op, buffers[i], criteria.float16); - break; - case Type::TENSOR_INT32: - expectNear<int32_t>(*op, buffers[i], criteria.int32); - break; - case Type::TENSOR_QUANT8_ASYMM: - expectNear<uint8_t>(*op, buffers[i], criteria.quant8Asymm); - break; - case Type::TENSOR_QUANT8_SYMM: - expectNear<int8_t>(*op, buffers[i], criteria.quant8Symm); - break; - case Type::TENSOR_QUANT16_ASYMM: - expectNear<uint16_t>(*op, buffers[i], criteria.quant16Asymm); - break; - case Type::TENSOR_QUANT16_SYMM: - expectNear<int16_t>(*op, buffers[i], criteria.quant16Symm); - break; - case Type::TENSOR_BOOL8: - expectBooleanEqual(*op, buffers[i]); - break; - default: - NN_FUZZER_CHECK(false) << "Data type not supported."; + SCOPED_TRACE(testing::Message() + << "When comparing output " << op->ioIndex << " (op" << op->opIndex << ")" + << " of type " << toString(op->dataType)); + if (!op->doNotCheckAccuracy) { + switch (op->dataType) { + case Type::TENSOR_FLOAT32: + expectNear<float>(*op, buffers[i], criteria.float32); + break; + case Type::TENSOR_FLOAT16: + expectNear<_Float16>(*op, buffers[i], criteria.float16); + break; + case Type::TENSOR_INT32: + expectNear<int32_t>(*op, buffers[i], criteria.int32); + break; + case Type::TENSOR_QUANT8_ASYMM: + expectNear<uint8_t>(*op, buffers[i], criteria.quant8Asymm); + break; + case Type::TENSOR_QUANT8_SYMM: + expectNear<int8_t>(*op, buffers[i], criteria.quant8Symm); + break; + case Type::TENSOR_QUANT16_ASYMM: + expectNear<uint16_t>(*op, buffers[i], criteria.quant16Asymm); + break; + case Type::TENSOR_QUANT16_SYMM: + expectNear<int16_t>(*op, buffers[i], criteria.quant16Symm); + break; + case Type::TENSOR_BOOL8: + expectBooleanEqual(*op, buffers[i]); + break; + default: + NN_FUZZER_CHECK(false) << "Data type not supported."; + } } i++; } diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.h b/nn/runtime/test/fuzzing/RandomGraphGenerator.h index 5972d79f8..c3e2b6789 100644 --- a/nn/runtime/test/fuzzing/RandomGraphGenerator.h +++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.h @@ -54,6 +54,14 @@ struct RandomOperand { // The index of the input/output as specified in model->identifyInputsAndOutputs(...). int32_t ioIndex = -1; + // If set true, this operand will be ignored during the accuracy checking step. + bool doNotCheckAccuracy = false; + + // If set true, this operand will not be connected to another operation, e.g. if this operand is + // an operation output, then it will not be used as an input to another operation, and will + // eventually end up being a model output. + bool doNotConnect = false; + RandomOperand(const OperandSignature& op, Type dataType, uint32_t rank); // Resize the underlying operand buffer. diff --git a/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp b/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp index 88a54948e..e98ba465c 100644 --- a/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp +++ b/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp @@ -45,6 +45,12 @@ static void broadcastOpConstructor(Type dataType, uint32_t rank, RandomOperation float minScale = op->inputs[0]->scale * op->inputs[1]->scale; op->outputs[0]->scale = getUniform(minScale, minScale * 5); } + + // DIV and POW may produce Inf output values. We should not connect this output tensor to the + // input of another operation. + if (op->opType == ANEURALNETWORKS_DIV || op->opType == ANEURALNETWORKS_POW) { + op->outputs[0]->doNotConnect = true; + } } // For broadcast operations with fused activation. diff --git a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp index 548fbb143..dc825bf81 100644 --- a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp +++ b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp @@ -51,14 +51,34 @@ DEFINE_ELEMENTWISE_SIGNATURE(RELU6, V1_2, Type::TENSOR_FLOAT16); DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(ABS, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16); DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(EXP, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16); -DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(LOG, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16); DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(NEG, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32); -DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(RSQRT, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16); DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(SIN, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16); -DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(SQRT, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16); DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(LOGICAL_NOT, V1_2, Type::TENSOR_BOOL8); +// LOG, SQRT, and RSQRT may produce NaN output values. We should not connect the output tensor to +// the input of another operation. +static void elementwiseOpWithDisconnectedOutput(Type type, uint32_t rank, RandomOperation* op) { + sameShapeOpConstructor(type, rank, op); + op->outputs[0]->doNotConnect = true; +} + +#define DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(op, ver, ...) \ + DEFINE_OPERATION_SIGNATURE(op##_##ver){.opType = ANEURALNETWORKS_##op, \ + .supportedDataTypes = {__VA_ARGS__}, \ + .supportedRanks = {1, 2, 3, 4, 5}, \ + .version = HalVersion::ver, \ + .inputs = {INPUT_DEFAULT}, \ + .outputs = {OUTPUT_DEFAULT}, \ + .constructor = elementwiseOpWithDisconnectedOutput}; + +DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(LOG, V1_2, Type::TENSOR_FLOAT32, + Type::TENSOR_FLOAT16); +DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(RSQRT, V1_2, Type::TENSOR_FLOAT32, + Type::TENSOR_FLOAT16); +DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(SQRT, V1_2, Type::TENSOR_FLOAT32, + Type::TENSOR_FLOAT16); + // Quantized operations with special output quantization parameters. #define DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(op, ver, s, z, ...) \ DEFINE_OPERATION_SIGNATURE(op##_##ver){.opType = ANEURALNETWORKS_##op, \ diff --git a/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp b/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp index 4acfea8d6..59dc0cae8 100644 --- a/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp +++ b/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp @@ -71,6 +71,9 @@ static void l2normConstructor(Type dataType, uint32_t rank, RandomOperation* op) if (op->inputs.size() > 1) { op->inputs[1]->setScalarValue<int32_t>(getUniform<int32_t>(-rank, rank - 1)); } + // L2_NORMALIZATION may produce NaN output values with all zero inputs. We should not connect + // the output tensor to the input of another operation. + op->outputs[0]->doNotConnect = true; } DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_V1_0){.opType = ANEURALNETWORKS_L2_NORMALIZATION, diff --git a/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp b/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp index 38dcdcb3a..8d3a380df 100644 --- a/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp +++ b/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp @@ -52,6 +52,12 @@ static void reduceOpConstructor(Type, uint32_t rank, RandomOperation* op) { } } setSameQuantization(op->outputs[0], op->inputs[0]); + + // REDUCE_PROD may produce Inf output values. We should not connect the output tensor to the + // input of another operation. + if (op->opType == ANEURALNETWORKS_REDUCE_PROD) { + op->outputs[0]->doNotConnect = true; + } } #define DEFINE_MEAN_SIGNATURE(ver, ...) \ diff --git a/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp b/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp index 8b401f4d1..432a48886 100644 --- a/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp +++ b/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp @@ -180,6 +180,10 @@ static void topKConstructor(Type, uint32_t rank, RandomOperation* op) { op->outputs[0]->dimensions.back() = k; op->outputs[1]->dimensions.back() = k; setSameQuantization(op->outputs[0], op->inputs[0]); + + // As the sorting is not required to be stable, we should not check the second output (indices). + op->outputs[1]->doNotCheckAccuracy = true; + op->outputs[1]->doNotConnect = true; } DEFINE_OPERATION_SIGNATURE(TOPK_V2_V1_2){ |