summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXusong Wang <xusongw@google.com>2019-06-11 21:32:11 +0000
committerAndroid (Google) Code Review <android-gerrit@google.com>2019-06-11 21:32:11 +0000
commit024c11aa748f15a8ec179a786801fe5ae0f188ac (patch)
treefe54105d57525fadd0a20d5df9af414cc29188ef
parent896614c99271e4413091170f32497cbb06901f84 (diff)
parentf0b6c450af089a1d1078f070001581e5ea2c7cf3 (diff)
downloadml-024c11aa748f15a8ec179a786801fe5ae0f188ac.tar.gz
Merge changes I7faabafc,Id71244c5 into qt-dev
* changes: Relax the tolerable range for quant and boolean values. Add RandomOperand flags: doNotCheckAccuracy and doNotConnect.
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGenerator.cpp82
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGenerator.h8
-rw-r--r--nn/runtime/test/fuzzing/TestRandomGraph.cpp24
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp6
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp26
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp3
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp6
-rw-r--r--nn/runtime/test/fuzzing/operation_signatures/Selection.cpp4
8 files changed, 109 insertions, 50 deletions
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
index 2ad183b48..6c21b7eea 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
@@ -52,7 +52,7 @@ std::vector<uint32_t> RandomOperand::getDimensions() const {
bool RandomOperand::createEdgeIfValid(const RandomOperand& other) const {
if (other.type != RandomOperandType::INPUT) return false;
if (dataType != other.dataType || dimensions.size() != other.dimensions.size() ||
- scale != other.scale || zeroPoint != other.zeroPoint)
+ scale != other.scale || zeroPoint != other.zeroPoint || doNotConnect || other.doNotConnect)
return false;
return RandomVariableNetwork::get()->setEqualIfCompatible(dimensions, other.dimensions);
}
@@ -320,17 +320,26 @@ void expectNear(const RandomOperand& op, const OperandBuffer& test,
EXPECT_LE(mse, criterion.mse);
}
-void expectBooleanEqual(const RandomOperand& op, const OperandBuffer& test) {
+// For boolean values, we expect the number of mismatches does not exceed a certain ratio.
+void expectBooleanNearlyEqual(const RandomOperand& op, const OperandBuffer& test,
+ float allowedErrorRatio) {
const bool8* actual = reinterpret_cast<const bool8*>(test.data());
const bool8* expected = reinterpret_cast<const bool8*>(op.buffer.data());
uint32_t len = op.getNumberOfElements();
uint32_t numErrors = 0;
+ std::stringstream errorMsg;
for (uint32_t i = 0; i < len; i++) {
- SCOPED_TRACE(testing::Message() << "When comparing element " << i);
- if (numErrors < kMaxNumberOfPrintedErrors) EXPECT_EQ(expected[i], actual[i]);
- if (expected[i] != actual[i]) numErrors++;
+ if (expected[i] != actual[i]) {
+ if (numErrors < kMaxNumberOfPrintedErrors)
+ errorMsg << " Expected: " << expected[i] << ", actual: " << actual[i]
+ << ", when comparing element " << i << "\n";
+ numErrors++;
+ }
}
- EXPECT_EQ(numErrors, 0u);
+ // When |len| is small, the allowedErrorCount will intentionally ceil at 1, which allows for
+ // greater tolerance.
+ uint32_t allowedErrorCount = static_cast<uint32_t>(std::ceil(allowedErrorRatio * len));
+ EXPECT_LE(numErrors, allowedErrorCount) << errorMsg.str();
}
void RandomGraph::checkResults(const std::vector<OperandBuffer>& buffers,
@@ -340,35 +349,38 @@ void RandomGraph::checkResults(const std::vector<OperandBuffer>& buffers,
int i = 0;
for (const auto& op : mOperands) {
if (op->type == RandomOperandType::OUTPUT) {
- SCOPED_TRACE(testing::Message() << "When comparing output " << op->ioIndex
- << " of type " << toString(op->dataType));
- switch (op->dataType) {
- case Type::TENSOR_FLOAT32:
- expectNear<float>(*op, buffers[i], criteria.float32);
- break;
- case Type::TENSOR_FLOAT16:
- expectNear<_Float16>(*op, buffers[i], criteria.float16);
- break;
- case Type::TENSOR_INT32:
- expectNear<int32_t>(*op, buffers[i], criteria.int32);
- break;
- case Type::TENSOR_QUANT8_ASYMM:
- expectNear<uint8_t>(*op, buffers[i], criteria.quant8Asymm);
- break;
- case Type::TENSOR_QUANT8_SYMM:
- expectNear<int8_t>(*op, buffers[i], criteria.quant8Symm);
- break;
- case Type::TENSOR_QUANT16_ASYMM:
- expectNear<uint16_t>(*op, buffers[i], criteria.quant16Asymm);
- break;
- case Type::TENSOR_QUANT16_SYMM:
- expectNear<int16_t>(*op, buffers[i], criteria.quant16Symm);
- break;
- case Type::TENSOR_BOOL8:
- expectBooleanEqual(*op, buffers[i]);
- break;
- default:
- NN_FUZZER_CHECK(false) << "Data type not supported.";
+ SCOPED_TRACE(testing::Message()
+ << "When comparing output " << op->ioIndex << " (op" << op->opIndex << ")"
+ << " of type " << toString(op->dataType));
+ if (!op->doNotCheckAccuracy) {
+ switch (op->dataType) {
+ case Type::TENSOR_FLOAT32:
+ expectNear<float>(*op, buffers[i], criteria.float32);
+ break;
+ case Type::TENSOR_FLOAT16:
+ expectNear<_Float16>(*op, buffers[i], criteria.float16);
+ break;
+ case Type::TENSOR_INT32:
+ expectNear<int32_t>(*op, buffers[i], criteria.int32);
+ break;
+ case Type::TENSOR_QUANT8_ASYMM:
+ expectNear<uint8_t>(*op, buffers[i], criteria.quant8Asymm);
+ break;
+ case Type::TENSOR_QUANT8_SYMM:
+ expectNear<int8_t>(*op, buffers[i], criteria.quant8Symm);
+ break;
+ case Type::TENSOR_QUANT16_ASYMM:
+ expectNear<uint16_t>(*op, buffers[i], criteria.quant16Asymm);
+ break;
+ case Type::TENSOR_QUANT16_SYMM:
+ expectNear<int16_t>(*op, buffers[i], criteria.quant16Symm);
+ break;
+ case Type::TENSOR_BOOL8:
+ expectBooleanNearlyEqual(*op, buffers[i], /*allowedErrorRatio=*/0.01);
+ break;
+ default:
+ NN_FUZZER_CHECK(false) << "Data type not supported.";
+ }
}
i++;
}
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.h b/nn/runtime/test/fuzzing/RandomGraphGenerator.h
index 5972d79f8..c3e2b6789 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.h
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.h
@@ -54,6 +54,14 @@ struct RandomOperand {
// The index of the input/output as specified in model->identifyInputsAndOutputs(...).
int32_t ioIndex = -1;
+ // If set true, this operand will be ignored during the accuracy checking step.
+ bool doNotCheckAccuracy = false;
+
+ // If set true, this operand will not be connected to another operation, e.g. if this operand is
+ // an operation output, then it will not be used as an input to another operation, and will
+ // eventually end up being a model output.
+ bool doNotConnect = false;
+
RandomOperand(const OperandSignature& op, Type dataType, uint32_t rank);
// Resize the underlying operand buffer.
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index e47ca1840..81e1ea55f 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -422,10 +422,10 @@ const AccuracyCriteria kRelaxedCriteria = {
.float32 = {.atol = 1e-3f, .rtol = 1e-3f, .bias = 2e-5f, .mse = 1e-7f},
.float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 5e-3f, .mse = 1e-4f},
.int32 = {.atol = 1},
- .quant8Asymm = {.atol = 8, .bias = 1, .mse = 1},
- .quant8Symm = {.atol = 8, .bias = 1, .mse = 1},
- .quant16Asymm = {.atol = 8, .bias = 1, .mse = 1},
- .quant16Symm = {.atol = 8, .bias = 1, .mse = 1}};
+ .quant8Asymm = {.atol = 10, .bias = 1.5, .mse = 1.5},
+ .quant8Symm = {.atol = 10, .bias = 1.5, .mse = 1.5},
+ .quant16Asymm = {.atol = 10, .bias = 1.5, .mse = 1.5},
+ .quant16Symm = {.atol = 10, .bias = 1.5, .mse = 1.5}};
/*-- NNAPI 1.0 Operations ---------------------------------------------------*/
@@ -574,19 +574,19 @@ const AccuracyCriteria kSmallGraphCriteria = {
.float32 = {.atol = 1e-2f, .rtol = 1e-2f, .bias = 2e-5f, .mse = 1e-7f},
.float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 5e-3f, .mse = 1e-4f},
.int32 = {.atol = 1},
- .quant8Asymm = {.atol = 8, .bias = 1, .mse = 1},
- .quant8Symm = {.atol = 8, .bias = 1, .mse = 1},
- .quant16Asymm = {.atol = 8, .bias = 1, .mse = 1},
- .quant16Symm = {.atol = 8, .bias = 1, .mse = 1}};
+ .quant8Asymm = {.atol = 12, .bias = 2, .mse = 2},
+ .quant8Symm = {.atol = 12, .bias = 2, .mse = 2},
+ .quant16Asymm = {.atol = 12, .bias = 2, .mse = 2},
+ .quant16Symm = {.atol = 12, .bias = 2, .mse = 2}};
const AccuracyCriteria kLargeGraphCriteria = {
.float32 = {.atol = 1e-1f, .rtol = 1e-1f, .bias = 1e-2f, .mse = 1e-4f},
.float16 = {.atol = 1.0f, .rtol = 1.0f, .bias = 1e-1f, .mse = 5e-2f},
.int32 = {.atol = 1},
- .quant8Asymm = {.atol = 10, .bias = 2, .mse = 2},
- .quant8Symm = {.atol = 10, .bias = 2, .mse = 2},
- .quant16Asymm = {.atol = 10, .bias = 2, .mse = 2},
- .quant16Symm = {.atol = 10, .bias = 2, .mse = 2}};
+ .quant8Asymm = {.atol = 12, .bias = 2, .mse = 2},
+ .quant8Symm = {.atol = 12, .bias = 2, .mse = 2},
+ .quant16Asymm = {.atol = 12, .bias = 2, .mse = 2},
+ .quant16Symm = {.atol = 12, .bias = 2, .mse = 2}};
// Due to the limitation of the random graph generator, graphs generated with mixed-type or
// mixed-rank operations are likely to result in a disconnected network. Thus, we filter the
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp b/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
index 88a54948e..e98ba465c 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Broadcast.cpp
@@ -45,6 +45,12 @@ static void broadcastOpConstructor(Type dataType, uint32_t rank, RandomOperation
float minScale = op->inputs[0]->scale * op->inputs[1]->scale;
op->outputs[0]->scale = getUniform(minScale, minScale * 5);
}
+
+ // DIV and POW may produce Inf output values. We should not connect this output tensor to the
+ // input of another operation.
+ if (op->opType == ANEURALNETWORKS_DIV || op->opType == ANEURALNETWORKS_POW) {
+ op->outputs[0]->doNotConnect = true;
+ }
}
// For broadcast operations with fused activation.
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
index 548fbb143..dc825bf81 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Elementwise.cpp
@@ -51,14 +51,34 @@ DEFINE_ELEMENTWISE_SIGNATURE(RELU6, V1_2, Type::TENSOR_FLOAT16);
DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(ABS, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(EXP, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(LOG, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(NEG, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
Type::TENSOR_INT32);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(RSQRT, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(SIN, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
-DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(SQRT, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
DEFINE_ELEMENTWISE_SIGNATURE_WITH_RANK5(LOGICAL_NOT, V1_2, Type::TENSOR_BOOL8);
+// LOG, SQRT, and RSQRT may produce NaN output values. We should not connect the output tensor to
+// the input of another operation.
+static void elementwiseOpWithDisconnectedOutput(Type type, uint32_t rank, RandomOperation* op) {
+ sameShapeOpConstructor(type, rank, op);
+ op->outputs[0]->doNotConnect = true;
+}
+
+#define DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(op, ver, ...) \
+ DEFINE_OPERATION_SIGNATURE(op##_##ver){.opType = ANEURALNETWORKS_##op, \
+ .supportedDataTypes = {__VA_ARGS__}, \
+ .supportedRanks = {1, 2, 3, 4, 5}, \
+ .version = HalVersion::ver, \
+ .inputs = {INPUT_DEFAULT}, \
+ .outputs = {OUTPUT_DEFAULT}, \
+ .constructor = elementwiseOpWithDisconnectedOutput};
+
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(LOG, V1_2, Type::TENSOR_FLOAT32,
+ Type::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(RSQRT, V1_2, Type::TENSOR_FLOAT32,
+ Type::TENSOR_FLOAT16);
+DEFINE_ELEMENTWISE_SIGNATURE_WITH_DISCONNECTED_OUTPUT(SQRT, V1_2, Type::TENSOR_FLOAT32,
+ Type::TENSOR_FLOAT16);
+
// Quantized operations with special output quantization parameters.
#define DEFINE_ELEMENTWISE_WITH_QUANT_OUTPUT_SIGNATURE(op, ver, s, z, ...) \
DEFINE_OPERATION_SIGNATURE(op##_##ver){.opType = ANEURALNETWORKS_##op, \
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp b/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
index 4acfea8d6..59dc0cae8 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Normalization.cpp
@@ -71,6 +71,9 @@ static void l2normConstructor(Type dataType, uint32_t rank, RandomOperation* op)
if (op->inputs.size() > 1) {
op->inputs[1]->setScalarValue<int32_t>(getUniform<int32_t>(-rank, rank - 1));
}
+ // L2_NORMALIZATION may produce NaN output values with all zero inputs. We should not connect
+ // the output tensor to the input of another operation.
+ op->outputs[0]->doNotConnect = true;
}
DEFINE_OPERATION_SIGNATURE(L2_NORMALIZATION_V1_0){.opType = ANEURALNETWORKS_L2_NORMALIZATION,
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp b/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
index 38dcdcb3a..8d3a380df 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Reduce.cpp
@@ -52,6 +52,12 @@ static void reduceOpConstructor(Type, uint32_t rank, RandomOperation* op) {
}
}
setSameQuantization(op->outputs[0], op->inputs[0]);
+
+ // REDUCE_PROD may produce Inf output values. We should not connect the output tensor to the
+ // input of another operation.
+ if (op->opType == ANEURALNETWORKS_REDUCE_PROD) {
+ op->outputs[0]->doNotConnect = true;
+ }
}
#define DEFINE_MEAN_SIGNATURE(ver, ...) \
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp b/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
index 8b401f4d1..432a48886 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Selection.cpp
@@ -180,6 +180,10 @@ static void topKConstructor(Type, uint32_t rank, RandomOperation* op) {
op->outputs[0]->dimensions.back() = k;
op->outputs[1]->dimensions.back() = k;
setSameQuantization(op->outputs[0], op->inputs[0]);
+
+ // As the sorting is not required to be stable, we should not check the second output (indices).
+ op->outputs[1]->doNotCheckAccuracy = true;
+ op->outputs[1]->doNotConnect = true;
}
DEFINE_OPERATION_SIGNATURE(TOPK_V2_V1_2){