diff options
author | Treehugger Robot <treehugger-gerrit@google.com> | 2018-09-05 17:25:37 +0000 |
---|---|---|
committer | Gerrit Code Review <noreply-gerritcodereview@google.com> | 2018-09-05 17:25:37 +0000 |
commit | da1d60742a0b1ec52461ef83ec141740718f0d0d (patch) | |
tree | 793d431fc161531c2d444e474277f527abcb27d1 | |
parent | 89e08c0689df686a54292c0e20d7b2bdf8b11b60 (diff) | |
parent | d06374fb7b96e38e33d70f00398f11d44b7a4f02 (diff) | |
download | ml-pie-cts-dev.tar.gz |
Merge changes I8052d979,I82aaf304 into pie-cts-devpie-cts-dev
* changes:
Fix logics for floating-point comparision.
Fix invalid tests for relaxed computation.
16 files changed, 54 insertions, 49 deletions
diff --git a/nn/runtime/test/TestGenerated.cpp b/nn/runtime/test/TestGenerated.cpp index c0a5f4cab..d27c184f2 100644 --- a/nn/runtime/test/TestGenerated.cpp +++ b/nn/runtime/test/TestGenerated.cpp @@ -82,8 +82,11 @@ static void execute(std::function<void(Model*)> createModel, Compilation compilation(&model); compilation.finish(); - // If in relaxed mode, set the error range to be 5ULP of FP16. - float fpRange = !model.isRelaxed() ? 1e-5f : 5.0f * 0.0009765625f; + // TODO: Adjust the error limit based on testing. + // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16. + float fpAtol = !model.isRelaxed() ? 1e-5f : 5.0f * 0.0009765625f; + // Set the relative tolerance to be 5ULP of the corresponding FP precision. + float fpRtol = !model.isRelaxed() ? 5.0f * 1.1920928955078125e-7f : 5.0f * 0.0009765625f; for (auto& example : examples) { SCOPED_TRACE(exampleNo); // TODO: We leave it as a copy here. @@ -123,7 +126,7 @@ static void execute(std::function<void(Model*)> createModel, MixedTyped filteredTest = filter(test, isIgnored); // We want "close-enough" results for float - compare(filteredGolden, filteredTest, fpRange); + compare(filteredGolden, filteredTest, fpAtol, fpRtol); exampleNo++; } } diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_relaxed.example.cpp index d8c887da9..2a98ea450 100644 --- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_relaxed.example.cpp +++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_relaxed.example.cpp @@ -4,7 +4,7 @@ //Input(s) { // See tools/test_generator/include/TestHarness.h:MixedTyped // int -> FLOAT32 map - {{0, {10, 21, 100, 0, 10, 22, 200, 0, 10, 23, 300, 0, 10, 24, 400, 0}}}, + {{0, {10, 21, 10, 0, 10, 22, 20, 0, 10, 23, 30, 0, 10, 24, 40, 0}}}, // int -> INT32 map {}, // int -> QUANT8_ASYMM map @@ -13,7 +13,7 @@ //Output(s) { // See tools/test_generator/include/TestHarness.h:MixedTyped // int -> FLOAT32 map - {{0, {600010, 700046, 830000, 900000}}}, + {{0, {6010, 7046, 11000, 9000}}}, // int -> INT32 map {}, // int -> QUANT8_ASYMM map diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp index 9b6e34286..c79d2b76a 100644 --- a/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp +++ b/nn/runtime/test/generated/examples/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.example.cpp @@ -4,7 +4,7 @@ //Input(s) { // See tools/test_generator/include/TestHarness.h:MixedTyped // int -> FLOAT32 map - {{0, {10, 21, 100, 10, 22, 200, 10, 23, 300, 10, 24, 400}}, {1, {0.25f, 0, 10, 100, 0.25f, 1, 20, 100, 0.25f, 0, 30, 100, 0.25f, 1, 40, 100}}, {2, {600000, 700000, 800000, 900000}}}, + {{0, {10, 21, 10, 0, 10, 22, 20, 0, 10, 23, 30, 0, 10, 24, 40, 0}}, {1, {0.25f, 0, 10, 100, 0.25f, 1, 20, 100, 0.25f, 0, 30, 100, 0.25f, 1, 40, 100}}, {2, {6000, 7000, 8000, 9000}}}, // int -> INT32 map {}, // int -> QUANT8_ASYMM map @@ -13,7 +13,7 @@ //Output(s) { // See tools/test_generator/include/TestHarness.h:MixedTyped // int -> FLOAT32 map - {{0, {600010, 700046, 830000, 900000}}}, + {{0, {6010, 7046, 11000, 9000}}}, // int -> INT32 map {}, // int -> QUANT8_ASYMM map diff --git a/nn/runtime/test/generated/examples/fully_connected_float_large_relaxed.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_large_relaxed.example.cpp index 131bc5fe3..e12527666 100644 --- a/nn/runtime/test/generated/examples/fully_connected_float_large_relaxed.example.cpp +++ b/nn/runtime/test/generated/examples/fully_connected_float_large_relaxed.example.cpp @@ -4,7 +4,7 @@ //Input(s) { // See tools/test_generator/include/TestHarness.h:MixedTyped // int -> FLOAT32 map - {{0, {1, 10, 100, 1000, 10000}}}, + {{0, {1, 10, 100, 500, 1000}}}, // int -> INT32 map {}, // int -> QUANT8_ASYMM map @@ -13,7 +13,7 @@ //Output(s) { // See tools/test_generator/include/TestHarness.h:MixedTyped // int -> FLOAT32 map - {{0, {965432}}}, + {{0, {9832}}}, // int -> INT32 map {}, // int -> QUANT8_ASYMM map diff --git a/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp b/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp index 990fd44a2..09ece5dce 100644 --- a/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp +++ b/nn/runtime/test/generated/examples/fully_connected_float_large_weights_as_inputs_relaxed.example.cpp @@ -4,7 +4,7 @@ //Input(s) { // See tools/test_generator/include/TestHarness.h:MixedTyped // int -> FLOAT32 map - {{0, {1, 10, 100, 1000, 10000}}, {1, {2, 3, 4, 5, 6}}, {2, {900000}}}, + {{0, {1, 10, 100, 500, 1000}}, {1, {2, 3, 4, 5, 6}}, {2, {900}}}, // int -> INT32 map {}, // int -> QUANT8_ASYMM map @@ -13,7 +13,7 @@ //Output(s) { // See tools/test_generator/include/TestHarness.h:MixedTyped // int -> FLOAT32 map - {{0, {965432}}}, + {{0, {9832}}}, // int -> INT32 map {}, // int -> QUANT8_ASYMM map diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_relaxed.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_relaxed.model.cpp index f736d8f83..724e3cc80 100644 --- a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_relaxed.model.cpp +++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_relaxed.model.cpp @@ -16,7 +16,7 @@ void CreateModel(Model *model) { // Phase 2, operations static float op2_init[] = {0.25f, 0.0f, 10.0f, 100.0f, 0.25f, 1.0f, 20.0f, 100.0f, 0.25f, 0.0f, 30.0f, 100.0f, 0.25f, 1.0f, 40.0f, 100.0f}; model->setOperandValue(op2, op2_init, sizeof(float) * 16); - static float op3_init[] = {600000.0f, 700000.0f, 800000.0f, 900000.0f}; + static float op3_init[] = {6000.0f, 7000.0f, 8000.0f, 9000.0f}; model->setOperandValue(op3, op3_init, sizeof(float) * 4); static int32_t pad0_init[] = {0}; model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1); diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.model.cpp index d24074c7b..0516ef451 100644 --- a/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.model.cpp +++ b/nn/runtime/test/generated/models/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.model.cpp @@ -1,19 +1,18 @@ // Generated file (from: depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.mod.py). Do not edit void CreateModel(Model *model) { - OperandType type3(Type::INT32, {}); - OperandType type4(Type::TENSOR_FLOAT32, {1, 1, 1, 4}); - OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 3}); - OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 4}); - OperandType type2(Type::TENSOR_FLOAT32, {4}); + OperandType type2(Type::INT32, {}); + OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 4}); + OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 4}); + OperandType type1(Type::TENSOR_FLOAT32, {4}); // Phase 1, operands auto op1 = model->addOperand(&type0); - auto op2 = model->addOperand(&type1); - auto op3 = model->addOperand(&type2); - auto pad0 = model->addOperand(&type3); - auto act = model->addOperand(&type3); - auto stride = model->addOperand(&type3); - auto channelMultiplier = model->addOperand(&type3); - auto op4 = model->addOperand(&type4); + auto op2 = model->addOperand(&type0); + auto op3 = model->addOperand(&type1); + auto pad0 = model->addOperand(&type2); + auto act = model->addOperand(&type2); + auto stride = model->addOperand(&type2); + auto channelMultiplier = model->addOperand(&type2); + auto op4 = model->addOperand(&type3); // Phase 2, operations static int32_t pad0_init[] = {0}; model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1); diff --git a/nn/runtime/test/generated/models/fully_connected_float_large_relaxed.model.cpp b/nn/runtime/test/generated/models/fully_connected_float_large_relaxed.model.cpp index a4a719f8f..98ca877fb 100644 --- a/nn/runtime/test/generated/models/fully_connected_float_large_relaxed.model.cpp +++ b/nn/runtime/test/generated/models/fully_connected_float_large_relaxed.model.cpp @@ -13,7 +13,7 @@ void CreateModel(Model *model) { // Phase 2, operations static float op2_init[] = {2.0f, 3.0f, 4.0f, 5.0f, 6.0f}; model->setOperandValue(op2, op2_init, sizeof(float) * 5); - static float b0_init[] = {900000.0f}; + static float b0_init[] = {900.0f}; model->setOperandValue(b0, b0_init, sizeof(float) * 1); static int32_t act_init[] = {0}; model->setOperandValue(act, act_init, sizeof(int32_t) * 1); diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_relaxed.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_relaxed.model.cpp index 81fc5524b..ae0fd8c40 100644 --- a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_relaxed.model.cpp +++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_relaxed.model.cpp @@ -87,7 +87,7 @@ Model createTestModel() { const std::vector<uint32_t> inputIndexes = {0}; const std::vector<uint32_t> outputIndexes = {7}; std::vector<uint8_t> operandValues = { - 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 200, 66, 0, 0, 128, 62, 0, 0, 128, 63, 0, 0, 160, 65, 0, 0, 200, 66, 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 240, 65, 0, 0, 200, 66, 0, 0, 128, 62, 0, 0, 128, 63, 0, 0, 32, 66, 0, 0, 200, 66, 0, 124, 18, 73, 0, 230, 42, 73, 0, 80, 67, 73, 0, 186, 91, 73, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0 + 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 32, 65, 0, 0, 200, 66, 0, 0, 128, 62, 0, 0, 128, 63, 0, 0, 160, 65, 0, 0, 200, 66, 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 240, 65, 0, 0, 200, 66, 0, 0, 128, 62, 0, 0, 128, 63, 0, 0, 32, 66, 0, 0, 200, 66, 0, 128, 187, 69, 0, 192, 218, 69, 0, 0, 250, 69, 0, 160, 12, 70, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0 }; const std::vector<hidl_memory> pools = {}; diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.model.cpp index e205e2dbb..4c8415b08 100644 --- a/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.model.cpp +++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.model.cpp @@ -4,7 +4,7 @@ Model createTestModel() { const std::vector<Operand> operands = { { .type = OperandType::TENSOR_FLOAT32, - .dimensions = {1, 2, 2, 3}, + .dimensions = {1, 2, 2, 4}, .numberOfConsumers = 1, .scale = 0.0f, .zeroPoint = 0, diff --git a/nn/runtime/test/generated/vts_models/fully_connected_float_large_relaxed.model.cpp b/nn/runtime/test/generated/vts_models/fully_connected_float_large_relaxed.model.cpp index 9a3314046..712d47ce1 100644 --- a/nn/runtime/test/generated/vts_models/fully_connected_float_large_relaxed.model.cpp +++ b/nn/runtime/test/generated/vts_models/fully_connected_float_large_relaxed.model.cpp @@ -60,7 +60,7 @@ Model createTestModel() { const std::vector<uint32_t> inputIndexes = {0}; const std::vector<uint32_t> outputIndexes = {3}; std::vector<uint8_t> operandValues = { - 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 186, 91, 73, 0, 0, 0, 0 + 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 97, 68, 0, 0, 0, 0 }; const std::vector<hidl_memory> pools = {}; diff --git a/nn/runtime/test/specs/V1_1/depthwise_conv2d_float_large_2_relaxed.mod.py b/nn/runtime/test/specs/V1_1/depthwise_conv2d_float_large_2_relaxed.mod.py index 0d39d12a0..a878b6029 100644 --- a/nn/runtime/test/specs/V1_1/depthwise_conv2d_float_large_2_relaxed.mod.py +++ b/nn/runtime/test/specs/V1_1/depthwise_conv2d_float_large_2_relaxed.mod.py @@ -17,7 +17,7 @@ model = Model() i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 4}") # depth_in = 4 f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}", [.25, 0, 10, 100, .25, 1, 20, 100, .25, 0, 30, 100, .25, 1, 40, 100]) # depth_out = 4 -b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [600000, 700000, 800000, 900000]) # depth_out = 4 +b1 = Parameter("op3", "TENSOR_FLOAT32", "{4}", [6000, 7000, 8000, 9000]) # depth_out = 4 pad0 = Int32Scalar("pad0", 0) act = Int32Scalar("act", 0) stride = Int32Scalar("stride", 1) @@ -34,14 +34,14 @@ model = model.RelaxedExecution(True) # Example 1. Input in operand 0, input0 = { i1: [ # input 0 - 10, 21, 100, 0, - 10, 22, 200, 0, - 10, 23, 300, 0, - 10, 24, 400, 0], + 10, 21, 10, 0, + 10, 22, 20, 0, + 10, 23, 30, 0, + 10, 24, 40, 0], } # (i1 (conv) f1) + b1 output0 = {output: # output 0 - [600010, 700046, 830000, 900000]} + [6010, 7046, 11000, 9000]} # Instantiate an example Example((input0, output0)) diff --git a/nn/runtime/test/specs/V1_1/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.mod.py b/nn/runtime/test/specs/V1_1/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.mod.py index c3835120c..26d1f09bc 100644 --- a/nn/runtime/test/specs/V1_1/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.mod.py +++ b/nn/runtime/test/specs/V1_1/depthwise_conv2d_float_large_2_weights_as_inputs_relaxed.mod.py @@ -15,7 +15,7 @@ # model = Model() -i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 3}") # depth_in = 3 +i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 4}") # depth_in = 4 f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}") # depth_out = 4 b1 = Input("op3", "TENSOR_FLOAT32", "{4}") # depth_out = 4 pad0 = Int32Scalar("pad0", 0) @@ -34,21 +34,21 @@ model = model.RelaxedExecution(True) # Example 1. Input in operand 0, input0 = { i1: [ # input 0 - 10, 21, 100, - 10, 22, 200, - 10, 23, 300, - 10, 24, 400], + 10, 21, 10, 0, + 10, 22, 20, 0, + 10, 23, 30, 0, + 10, 24, 40, 0], f1: [ .25, 0, 10, 100, .25, 1, 20, 100, .25, 0, 30, 100, .25, 1, 40, 100], b1: - [600000, 700000, 800000, 900000] + [6000, 7000, 8000, 9000] } # (i1 (conv) f1) + b1 output0 = {output: # output 0 - [600010, 700046, 830000, 900000]} + [6010, 7046, 11000, 9000]} # Instantiate an example Example((input0, output0)) diff --git a/nn/runtime/test/specs/V1_1/fully_connected_float_large_relaxed.mod.py b/nn/runtime/test/specs/V1_1/fully_connected_float_large_relaxed.mod.py index d898534e6..e170f68ed 100644 --- a/nn/runtime/test/specs/V1_1/fully_connected_float_large_relaxed.mod.py +++ b/nn/runtime/test/specs/V1_1/fully_connected_float_large_relaxed.mod.py @@ -17,7 +17,7 @@ model = Model() in0 = Input("op1", "TENSOR_FLOAT32", "{1, 5}") # batch = 1, input_size = 5 weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 5}", [2, 3, 4, 5, 6]) # num_units = 1, input_size = 5 -bias = Parameter("b0", "TENSOR_FLOAT32", "{1}", [900000]) +bias = Parameter("b0", "TENSOR_FLOAT32", "{1}", [900]) out0 = Output("op3", "TENSOR_FLOAT32", "{1, 1}") # batch = 1, number_units = 1 act = Int32Scalar("act", 0) model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) @@ -25,9 +25,9 @@ model = model.RelaxedExecution(True) # Example 1. Input in operand 0, input0 = {in0: # input 0 - [1, 10, 100, 1000, 10000]} + [1, 10, 100, 500, 1000]} output0 = {out0: # output 0 - [965432]} + [9832]} # Instantiate an example Example((input0, output0)) diff --git a/nn/runtime/test/specs/V1_1/fully_connected_float_large_weights_as_inputs_relaxed.mod.py b/nn/runtime/test/specs/V1_1/fully_connected_float_large_weights_as_inputs_relaxed.mod.py index d3f24c5eb..01f71ba29 100644 --- a/nn/runtime/test/specs/V1_1/fully_connected_float_large_weights_as_inputs_relaxed.mod.py +++ b/nn/runtime/test/specs/V1_1/fully_connected_float_large_weights_as_inputs_relaxed.mod.py @@ -25,13 +25,13 @@ model = model.RelaxedExecution(True) # Example 1. Input in operand 0, input0 = {in0: # input 0 - [1, 10, 100, 1000, 10000], + [1, 10, 100, 500, 1000], weights: [2, 3, 4, 5, 6], bias: - [900000]} + [900]} output0 = {out0: # output 0 - [965432]} + [9832]} # Instantiate an example Example((input0, output0)) diff --git a/nn/tools/test_generator/include/TestHarness.h b/nn/tools/test_generator/include/TestHarness.h index adbdf8fe0..bb9d21550 100644 --- a/nn/tools/test_generator/include/TestHarness.h +++ b/nn/tools/test_generator/include/TestHarness.h @@ -175,9 +175,12 @@ void compare_( } #undef VALUE_TYPE #undef VECTOR_TYPE -inline void compare(const MixedTyped& golden, const MixedTyped& test, float fpRange = 1e-5f) { +inline void compare(const MixedTyped& golden, const MixedTyped& test, + float fpAtol = 1e-5f, float fpRtol = 1e-5f) { size_t totalNumberOfErrors = 0; - compare_<0>(golden, test, [&totalNumberOfErrors, fpRange](float g, float t) { + compare_<0>(golden, test, [&totalNumberOfErrors, fpAtol, fpRtol](float g, float t) { + // Compute the range based on both absolute tolerance and relative tolerance + float fpRange = fpAtol + fpRtol * std::abs(g); if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) { EXPECT_NEAR(g, t, fpRange); } |