diff options
author | Slava Shklyaev <slavash@google.com> | 2019-10-30 12:36:21 +0000 |
---|---|---|
committer | Slava Shklyaev <slavash@google.com> | 2019-11-07 10:49:22 +0000 |
commit | b9869c6834fde4173f4b3f5c74210ce53c529200 (patch) | |
tree | 780d2da912c6e9c1e71ddc630af0a3342920af8b | |
parent | 54c383493c8a3417179d9903314f0ec38a5deacb (diff) | |
download | ml-b9869c6834fde4173f4b3f5c74210ce53c529200.tar.gz |
Add new helper method to TestNeuralNetworksWrapper
addConstantOperand is a shortcut for writing more readable tests.
Before:
const int32_t kValue = 1;
uint32_t operand = model.addOperand(&type);
model.setOperandValue(operand, &kValue, sizeof(kValue));
After:
uint32_t operand = model.addConstantOperand(&type, int32_t{1});
Bug: 139181916
Test: NNT_static
Change-Id: I660e1949aac4334199e66352b0e16167b08566e3
Merged-In: I660e1949aac4334199e66352b0e16167b08566e3
(cherry picked from commit 586e4b9f3e2946b50c0773205c18ace1c56430d2)
-rw-r--r-- | nn/runtime/test/TestNeuralNetworksWrapper.h | 22 | ||||
-rw-r--r-- | nn/runtime/test/TestTrivialModel.cpp | 20 |
2 files changed, 24 insertions, 18 deletions
diff --git a/nn/runtime/test/TestNeuralNetworksWrapper.h b/nn/runtime/test/TestNeuralNetworksWrapper.h index 8bc8a9e7c..6f362e7da 100644 --- a/nn/runtime/test/TestNeuralNetworksWrapper.h +++ b/nn/runtime/test/TestNeuralNetworksWrapper.h @@ -20,15 +20,17 @@ #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_TEST_NEURAL_NETWORKS_WRAPPER_H #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_TEST_NEURAL_NETWORKS_WRAPPER_H -#include "NeuralNetworks.h" -#include "NeuralNetworksWrapper.h" -#include "NeuralNetworksWrapperExtensions.h" - #include <math.h> + #include <optional> #include <string> +#include <utility> #include <vector> +#include "NeuralNetworks.h" +#include "NeuralNetworksWrapper.h" +#include "NeuralNetworksWrapperExtensions.h" + namespace android { namespace nn { namespace test_wrapper { @@ -147,6 +149,18 @@ class Model { return mNextOperandId++; } + template <typename T> + uint32_t addConstantOperand(const OperandType* type, const T& value) { + static_assert(!std::is_pointer_v<T>, + "Pointer value type not supported because sizeof(T) is wrong"); + static_assert(sizeof(T) <= ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES, + "Values larger than ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES " + "not supported"); + uint32_t index = addOperand(type); + setOperandValue(index, &value, sizeof(T)); + return index; + } + void setOperandValue(uint32_t index, const void* buffer, size_t length) { if (ANeuralNetworksModel_setOperandValue(mModel, index, buffer, length) != ANEURALNETWORKS_NO_ERROR) { diff --git a/nn/runtime/test/TestTrivialModel.cpp b/nn/runtime/test/TestTrivialModel.cpp index 7280e6ae0..85da1d1f0 100644 --- a/nn/runtime/test/TestTrivialModel.cpp +++ b/nn/runtime/test/TestTrivialModel.cpp @@ -26,6 +26,8 @@ namespace { typedef float Matrix3x4[3][4]; typedef float Matrix4[4]; +const int32_t kNoActivation = ANEURALNETWORKS_FUSED_NONE; + class TrivialTest : public ::testing::Test { protected: virtual void SetUp() {} @@ -58,12 +60,10 @@ class TrivialTest : public ::testing::Test { void CreateAddTwoTensorModel(Model* model) { OperandType matrixType(Type::TENSOR_FLOAT32, {3, 4}); OperandType scalarType(Type::INT32, {}); - int32_t activation(ANEURALNETWORKS_FUSED_NONE); auto a = model->addOperand(&matrixType); auto b = model->addOperand(&matrixType); auto c = model->addOperand(&matrixType); - auto d = model->addOperand(&scalarType); - model->setOperandValue(d, &activation, sizeof(activation)); + auto d = model->addConstantOperand(&scalarType, kNoActivation); model->addOperation(ANEURALNETWORKS_ADD, {a, b, d}, {c}); model->identifyInputsAndOutputs({a, b}, {c}); ASSERT_TRUE(model->isValid()); @@ -75,15 +75,13 @@ void CreateAddTwoTensorModel(Model* model) { void CreateAddThreeTensorModel(Model* model, const Matrix3x4 bias) { OperandType matrixType(Type::TENSOR_FLOAT32, {3, 4}); OperandType scalarType(Type::INT32, {}); - int32_t activation(ANEURALNETWORKS_FUSED_NONE); auto a = model->addOperand(&matrixType); auto b = model->addOperand(&matrixType); auto c = model->addOperand(&matrixType); auto d = model->addOperand(&matrixType); auto e = model->addOperand(&matrixType); - auto f = model->addOperand(&scalarType); + auto f = model->addConstantOperand(&scalarType, kNoActivation); model->setOperandValue(e, bias, sizeof(Matrix3x4)); - model->setOperandValue(f, &activation, sizeof(activation)); model->addOperation(ANEURALNETWORKS_ADD, {a, c, f}, {b}); model->addOperation(ANEURALNETWORKS_ADD, {b, e, f}, {d}); model->identifyInputsAndOutputs({c, a}, {d}); @@ -154,11 +152,8 @@ TEST_F(TrivialTest, AddThree) { TEST_F(TrivialTest, BroadcastAddTwo) { Model modelBroadcastAdd2; - // activation: NONE. - int32_t activation_init[] = {ANEURALNETWORKS_FUSED_NONE}; OperandType scalarType(Type::INT32, {}); - auto activation = modelBroadcastAdd2.addOperand(&scalarType); - modelBroadcastAdd2.setOperandValue(activation, activation_init, sizeof(int32_t) * 1); + auto activation = modelBroadcastAdd2.addConstantOperand(&scalarType, kNoActivation); OperandType matrixType(Type::TENSOR_FLOAT32, {1, 1, 3, 4}); OperandType matrixType2(Type::TENSOR_FLOAT32, {4}); @@ -186,11 +181,8 @@ TEST_F(TrivialTest, BroadcastAddTwo) { TEST_F(TrivialTest, BroadcastMulTwo) { Model modelBroadcastMul2; - // activation: NONE. - int32_t activation_init[] = {ANEURALNETWORKS_FUSED_NONE}; OperandType scalarType(Type::INT32, {}); - auto activation = modelBroadcastMul2.addOperand(&scalarType); - modelBroadcastMul2.setOperandValue(activation, activation_init, sizeof(int32_t) * 1); + auto activation = modelBroadcastMul2.addConstantOperand(&scalarType, kNoActivation); OperandType matrixType(Type::TENSOR_FLOAT32, {1, 1, 3, 4}); OperandType matrixType2(Type::TENSOR_FLOAT32, {4}); |