summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTreeHugger Robot <treehugger-gerrit@google.com>2019-05-16 00:39:31 +0000
committerAndroid (Google) Code Review <android-gerrit@google.com>2019-05-16 00:39:31 +0000
commit4363637cbd8ca9b782a9ab5e2bfdc95404649910 (patch)
treeea9e0e63cadd83f9677c64801622c998f3e2538e
parent7d3435f6c477cfe9d1954084808cfdc5f8dfb4ad (diff)
parentbf13d7a8ecbe4c723405372ce279d51ce15af3ee (diff)
downloadml-4363637cbd8ca9b782a9ab5e2bfdc95404649910.tar.gz
Merge "Validate output quantization parameters in activation ops" into qt-dev
-rw-r--r--nn/common/operations/Activation.cpp43
-rw-r--r--nn/runtime/test/generated/models/logistic_v1_2.model.cpp4
-rw-r--r--nn/runtime/test/generated/vts_models/logistic_v1_2.model.cpp4
-rw-r--r--nn/runtime/test/specs/V1_2/logistic_v1_2.mod.py2
4 files changed, 37 insertions, 16 deletions
diff --git a/nn/common/operations/Activation.cpp b/nn/common/operations/Activation.cpp
index 491226e60..f85f6b4bf 100644
--- a/nn/common/operations/Activation.cpp
+++ b/nn/common/operations/Activation.cpp
@@ -226,11 +226,28 @@ bool validate(OperationType opType, const IOperationValidationContext* context)
return validateInputTypes(context, {inputType}) && validateOutputTypes(context, {inputType});
}
-bool prepare(IOperationExecutionContext* context) {
+bool prepare(OperationType opType, IOperationExecutionContext* context) {
Shape input = context->getInputShape(kInputTensor);
NN_RET_CHECK_LE(getNumberOfDimensions(input), 4);
- Shape output = context->getOutputShape(kOutputTensor);
- output.dimensions = input.dimensions;
+ Shape output = input;
+ if (input.type == OperandType::TENSOR_QUANT8_ASYMM) {
+ switch (opType) {
+ case OperationType::RELU:
+ case OperationType::RELU1:
+ case OperationType::RELU6:
+ break;
+ case OperationType::LOGISTIC:
+ output.scale = 1.f / 256;
+ output.offset = 0;
+ break;
+ case OperationType::TANH:
+ output.scale = 1.f / 128;
+ output.offset = 128;
+ break;
+ default:
+ NN_RET_CHECK_FAIL() << "Unsupported operation type";
+ }
+ }
return context->setOutputShape(kOutputTensor, output);
}
@@ -326,7 +343,7 @@ bool executeLogistic(IOperationExecutionContext* context) {
context->getOutputBuffer<uint8_t>(kOutputTensor),
context->getOutputShape(kOutputTensor));
default:
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation TANH";
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation LOGISTIC";
}
}
@@ -350,7 +367,7 @@ bool executeTanh(IOperationExecutionContext* context) {
context->getOutputBuffer<uint8_t>(kOutputTensor),
context->getOutputShape(kOutputTensor));
default:
- NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation LOGISTIC";
+ NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation TANH";
}
}
@@ -358,17 +375,21 @@ bool executeTanh(IOperationExecutionContext* context) {
using std::placeholders::_1;
NN_REGISTER_OPERATION(RELU, "RELU", std::bind(activation::validate, OperationType::RELU, _1),
- activation::prepare, activation::executeRelu, .allowZeroSizedInput = true);
+ std::bind(activation::prepare, OperationType::RELU, _1),
+ activation::executeRelu, .allowZeroSizedInput = true);
NN_REGISTER_OPERATION(RELU1, "RELU1", std::bind(activation::validate, OperationType::RELU1, _1),
- activation::prepare, activation::executeRelu1, .allowZeroSizedInput = true);
+ std::bind(activation::prepare, OperationType::RELU1, _1),
+ activation::executeRelu1, .allowZeroSizedInput = true);
NN_REGISTER_OPERATION(RELU6, "RELU6", std::bind(activation::validate, OperationType::RELU6, _1),
- activation::prepare, activation::executeRelu6, .allowZeroSizedInput = true);
+ std::bind(activation::prepare, OperationType::RELU6, _1),
+ activation::executeRelu6, .allowZeroSizedInput = true);
NN_REGISTER_OPERATION(LOGISTIC, "LOGISTIC",
std::bind(activation::validate, OperationType::LOGISTIC, _1),
- activation::prepare, activation::executeLogistic,
- .allowZeroSizedInput = true);
+ std::bind(activation::prepare, OperationType::LOGISTIC, _1),
+ activation::executeLogistic, .allowZeroSizedInput = true);
NN_REGISTER_OPERATION(TANH, "TANH", std::bind(activation::validate, OperationType::TANH, _1),
- activation::prepare, activation::executeTanh, .allowZeroSizedInput = true);
+ std::bind(activation::prepare, OperationType::TANH, _1),
+ activation::executeTanh, .allowZeroSizedInput = true);
} // namespace nn
} // namespace android
diff --git a/nn/runtime/test/generated/models/logistic_v1_2.model.cpp b/nn/runtime/test/generated/models/logistic_v1_2.model.cpp
index 3c5187861..170d881eb 100644
--- a/nn/runtime/test/generated/models/logistic_v1_2.model.cpp
+++ b/nn/runtime/test/generated/models/logistic_v1_2.model.cpp
@@ -252,7 +252,7 @@ void CreateModel_zero_sized_quant8(Model *model) {
OperandType type10(Type::BOOL, {});
OperandType type14(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.1f, 128);
OperandType type15(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 1}, 0.1f, 128);
- OperandType type16(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.00390625f, 128);
+ OperandType type16(Type::TENSOR_QUANT8_ASYMM, {0, 2, 2, 1}, 0.00390625f, 0);
OperandType type17(Type::TENSOR_QUANT16_ASYMM, {1, 8}, 0.125f, 0);
OperandType type18(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
@@ -597,7 +597,7 @@ void CreateModel_zero_sized_dynamic_output_shape_quant8(Model *model) {
OperandType type18(Type::TENSOR_QUANT16_ASYMM, {0, 4}, 0.125f, 0);
OperandType type19(Type::TENSOR_QUANT8_ASYMM, {1, 2}, 0.1f, 128);
OperandType type20(Type::TENSOR_QUANT8_ASYMM, {0}, 0.1f, 128);
- OperandType type29(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.00390625f, 128);
+ OperandType type29(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.00390625f, 0);
OperandType type5(Type::TENSOR_INT32, {0});
OperandType type7(Type::TENSOR_INT32, {1});
OperandType type8(Type::FLOAT32, {});
diff --git a/nn/runtime/test/generated/vts_models/logistic_v1_2.model.cpp b/nn/runtime/test/generated/vts_models/logistic_v1_2.model.cpp
index cd0f47a49..fc9419281 100644
--- a/nn/runtime/test/generated/vts_models/logistic_v1_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/logistic_v1_2.model.cpp
@@ -915,7 +915,7 @@ Model createTestModel_zero_sized_quant8() {
.dimensions = {0, 2, 2, 1},
.numberOfConsumers = 0,
.scale = 0.00390625f,
- .zeroPoint = 128,
+ .zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_OUTPUT,
.location = {.poolIndex = 0, .offset = 0, .length = 0},
}
@@ -1924,7 +1924,7 @@ Model createTestModel_zero_sized_dynamic_output_shape_quant8() {
.dimensions = {0, 0, 0, 0},
.numberOfConsumers = 0,
.scale = 0.00390625f,
- .zeroPoint = 128,
+ .zeroPoint = 0,
.lifetime = OperandLifeTime::MODEL_OUTPUT,
.location = {.poolIndex = 0, .offset = 0, .length = 0},
}
diff --git a/nn/runtime/test/specs/V1_2/logistic_v1_2.mod.py b/nn/runtime/test/specs/V1_2/logistic_v1_2.mod.py
index f8037b1c5..fe91a814d 100644
--- a/nn/runtime/test/specs/V1_2/logistic_v1_2.mod.py
+++ b/nn/runtime/test/specs/V1_2/logistic_v1_2.mod.py
@@ -82,7 +82,7 @@ quant8 = DataTypeConverter().Identify({
tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
i1: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
zero_sized: ("TENSOR_QUANT8_ASYMM", 0.1, 128),
- o3: ("TENSOR_QUANT8_ASYMM", 1.0 / 256, 128)
+ o3: ("TENSOR_QUANT8_ASYMM", 1.0 / 256, 0)
})
# Create test case with dummy values.