summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorandroid-build-team Robot <android-build-team-robot@google.com>2019-06-08 23:13:31 +0000
committerandroid-build-team Robot <android-build-team-robot@google.com>2019-06-08 23:13:31 +0000
commit2e363fa8468b0266df1f2b86451d967a32333d7e (patch)
tree985d93451a8bc65691c5643bc754f941460dba43
parentaf6cceaada7eda2a6ee38b057ff2b0e90a97682e (diff)
parent15eab11550666b34b210d7603a2d5b7ba35e7114 (diff)
downloadml-2e363fa8468b0266df1f2b86451d967a32333d7e.tar.gz
Snap for 5645017 from 15eab11550666b34b210d7603a2d5b7ba35e7114 to qt-release
Change-Id: Ic60fd00db988d4b1bb06d752506deabf30fb0b96
-rw-r--r--nn/common/OperationsUtils.cpp2
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGenerator.cpp8
-rw-r--r--nn/runtime/test/fuzzing/RandomGraphGenerator.h8
-rw-r--r--nn/runtime/test/fuzzing/TestRandomGraph.cpp28
4 files changed, 34 insertions, 12 deletions
diff --git a/nn/common/OperationsUtils.cpp b/nn/common/OperationsUtils.cpp
index 99c11780c..e128afbb7 100644
--- a/nn/common/OperationsUtils.cpp
+++ b/nn/common/OperationsUtils.cpp
@@ -340,7 +340,7 @@ uint8_t requantize(uint8_t value, const Shape& oldShape, const Shape& newShape)
double doubleRet = doubleValue / newShape.scale + newShape.offset;
if (doubleRet < 0) return 0;
if (doubleRet > 255) return 255;
- return static_cast<uint8_t>(doubleRet);
+ return static_cast<uint8_t>(std::round(doubleRet));
}
bool floorPrepare(const Shape& input, Shape* output) {
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
index 18295e49f..2ad183b48 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.cpp
@@ -277,6 +277,7 @@ constexpr uint32_t kMaxNumberOfPrintedErrors = 5;
template <typename T>
void expectNear(const RandomOperand& op, const OperandBuffer& test,
const AccuracyCriterion& criterion) {
+ constexpr uint32_t kMinNumberOfElementsToTestBiasMSE = 10;
const T* actualBuffer = reinterpret_cast<const T*>(test.data());
const T* expectedBuffer = reinterpret_cast<const T*>(op.buffer.data());
uint32_t len = op.getNumberOfElements();
@@ -297,8 +298,11 @@ void expectNear(const RandomOperand& op, const OperandBuffer& test,
continue;
}
- // Accumulate bias and MSE.
+ // Accumulate bias and MSE. Use relative bias and MSE for floating point values.
double diff = actual - expected;
+ if constexpr (NN_IS_FLOAT(T)) {
+ diff /= std::max(1.0, std::abs(expected));
+ }
bias += diff;
mse += diff * diff;
@@ -309,7 +313,7 @@ void expectNear(const RandomOperand& op, const OperandBuffer& test,
EXPECT_EQ(numErrors, 0u);
// Test bias and MSE.
- if (len == numSkip) return;
+ if (len < numSkip + kMinNumberOfElementsToTestBiasMSE) return;
bias /= static_cast<double>(len - numSkip);
mse /= static_cast<double>(len - numSkip);
EXPECT_LE(std::fabs(bias), criterion.bias);
diff --git a/nn/runtime/test/fuzzing/RandomGraphGenerator.h b/nn/runtime/test/fuzzing/RandomGraphGenerator.h
index 5599f0810..5972d79f8 100644
--- a/nn/runtime/test/fuzzing/RandomGraphGenerator.h
+++ b/nn/runtime/test/fuzzing/RandomGraphGenerator.h
@@ -103,11 +103,13 @@ struct RandomOperation {
// TODO: Consider relative bias and mse on floating point data types?
struct AccuracyCriterion {
// We expect the driver results to be unbiased.
- // Formula: abs(sum_{i}(actual - expected)) <= bias
+ // Formula: abs(sum_{i}(diff)) <= bias, where
+ // * fixed point: diff = actual - expected
+ // * floating point: diff = (actual - expected) / max(1, abs(expected))
float bias = std::numeric_limits<float>::max();
// Set the threshold on Mean Square Error (MSE).
- // Formula: sum_{i}((actual - expected) ^ 2) / sum(1) <= mse
+ // Formula: sum_{i}(diff ^ 2) / sum(1) <= mse
float mse = std::numeric_limits<float>::max();
// We also set accuracy thresholds on each element to detect any particular edge cases that may
@@ -153,6 +155,8 @@ class RandomGraph {
// Dump the generated random graph to a spec file for debugging and visualization purpose.
void dumpSpecFile(std::string filename, std::string testname);
+ const std::vector<RandomOperation>& getOperations() const { return mOperations; }
+
private:
// Generate the graph structure.
bool generateGraph(uint32_t numOperations);
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index bb9cd9556..eb6d4c663 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -188,8 +188,21 @@ class RandomGraphTest : public ::testing::TestWithParam<uint32_t> {
NN_FUZZER_LOG_CLOSE;
}
+ bool shouldSkipTest(int64_t featureLevel) {
+ if (featureLevel >= __ANDROID_API_Q__) return false;
+ const auto& operations = mGraph.getOperations();
+ for (const auto& op : operations) {
+ // Skip if testing BATCH_TO_SPACE_ND with batch dimension == 1.
+ if (op.opType == ANEURALNETWORKS_BATCH_TO_SPACE_ND &&
+ op.inputs[0]->dimensions[0].getValue() == 1)
+ return true;
+ }
+ return false;
+ }
+
// Compile and execute the generated graph on a device selected by name.
- void compute(const test_wrapper::Model* model, uint32_t numOps, const std::string& name) {
+ void computeAndVerifyResultsForDevice(const test_wrapper::Model* model, uint32_t numOps,
+ const std::string& name) {
SCOPED_TRACE("Device: " + name);
ASSERT_TRUE(mDevices.find(name) != mDevices.end());
const auto device = mDevices[name];
@@ -215,6 +228,7 @@ class RandomGraphTest : public ::testing::TestWithParam<uint32_t> {
int64_t featureLevel;
ASSERT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel),
ANEURALNETWORKS_NO_ERROR);
+ if (shouldSkipTest(featureLevel)) return;
// Create compilation for device.
CompilationForDevice compilation;
@@ -257,7 +271,7 @@ class RandomGraphTest : public ::testing::TestWithParam<uint32_t> {
// Compile and execute the generated graph normally (i.e., allow runtime to
// distribute across devices).
- void compute(const test_wrapper::Model* model, bool checkResults) {
+ void computeAndVerifyResults(const test_wrapper::Model* model, bool checkResults) {
// Because we're not using the introspection/control API, the CpuDevice
// is available as a fallback, and hence we assume that compilation and
// execution will succeed.
@@ -290,21 +304,21 @@ class RandomGraphTest : public ::testing::TestWithParam<uint32_t> {
ASSERT_EQ(model.finish(), Result::NO_ERROR);
// Compute reference result.
- compute(&model, numOperations, kRefDeviceName);
+ computeAndVerifyResultsForDevice(&model, numOperations, kRefDeviceName);
// Compute on each available device.
for (auto& pair : mDevices) {
// Skip the nnapi reference device.
if (pair.first.compare(kRefDeviceName) == 0) continue;
- compute(&model, numOperations, pair.first);
+ computeAndVerifyResultsForDevice(&model, numOperations, pair.first);
}
if (numOperations > 1) {
- {
+ if (!shouldSkipTest(mStandardDevicesFeatureLevel)) {
// Compute normally (i.e., allow runtime to distribute across
// devices).
SCOPED_TRACE("Compute normally");
- compute(&model, mStandardDevicesFeatureLevel >= __ANDROID_API_Q__);
+ computeAndVerifyResults(&model, mStandardDevicesFeatureLevel >= __ANDROID_API_Q__);
}
#ifndef NNTEST_CTS
@@ -317,7 +331,7 @@ class RandomGraphTest : public ::testing::TestWithParam<uint32_t> {
// reliability, as we do with real devices.
SCOPED_TRACE("Compute across synthetic devices");
DeviceManager::get()->forTest_setDevices(mSyntheticDevices);
- compute(&model, true);
+ computeAndVerifyResults(&model, true);
DeviceManager::get()->forTest_setDevices(mStandardDevices);
}
#endif