From d721f0edd0971c00c5b8c083fbc08e018b73832b Mon Sep 17 00:00:00 2001 From: Slava Shklyaev Date: Wed, 11 Nov 2020 14:02:25 +0000 Subject: Use sensible Capabilities in test drivers Although zero PerformanceInfo::execTime or powerUsage is not explicitly prohibited by the spec, it makes little sense and the new HAL device wrappers (change Iec6ae739) do not accept zero PerformanceInfo. Bug: 170289677 Test: NNT_static Change-Id: I6cc0af5f30ad980e4866badacd9ae8d2c9f87022 Merged-In: I6cc0af5f30ad980e4866badacd9ae8d2c9f87022 (cherry picked from commit e72232d73cd95c8a0383430d5b491744367b2303) --- nn/runtime/test/HalUtils.h | 37 ++++++++++++++++++++++++++ nn/runtime/test/TestExtensions.cpp | 3 ++- nn/runtime/test/TestFailingDriver.cpp | 6 ++--- nn/runtime/test/TestPartitioning.cpp | 15 +++-------- nn/runtime/test/TestRemoveDefaultArguments.cpp | 3 ++- 5 files changed, 46 insertions(+), 18 deletions(-) create mode 100644 nn/runtime/test/HalUtils.h diff --git a/nn/runtime/test/HalUtils.h b/nn/runtime/test/HalUtils.h new file mode 100644 index 000000000..a1cb5b13d --- /dev/null +++ b/nn/runtime/test/HalUtils.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_HAL_UTILS_H +#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_HAL_UTILS_H + +#include "HalInterfaces.h" +#include "Utils.h" + +namespace android::nn { + +// Creates valid V1_3::Capabilities. +inline V1_3::Capabilities makeCapabilities(float perf) { + const V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; + return {.relaxedFloat32toFloat16PerformanceScalar = perfInfo, + .relaxedFloat32toFloat16PerformanceTensor = perfInfo, + .operandPerformance = nonExtensionOperandPerformance(perfInfo), + .ifPerformance = perfInfo, + .whilePerformance = perfInfo}; +}; + +} // namespace android::nn + +#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_TEST_HAL_UTILS_H diff --git a/nn/runtime/test/TestExtensions.cpp b/nn/runtime/test/TestExtensions.cpp index da13073e2..d9fa96d7c 100644 --- a/nn/runtime/test/TestExtensions.cpp +++ b/nn/runtime/test/TestExtensions.cpp @@ -20,6 +20,7 @@ #include #include "HalInterfaces.h" +#include "HalUtils.h" #include "Manager.h" #include "NeuralNetworks.h" #include "NeuralNetworksExtensions.h" @@ -56,7 +57,7 @@ class TestDriver : public SampleDriver { } hardware::Return getCapabilities_1_3(getCapabilities_1_3_cb cb) override { - cb(V1_3::ErrorStatus::NONE, {/* Placeholder zero-filled capabilities. */}); + cb(V1_3::ErrorStatus::NONE, ::android::nn::makeCapabilities(1.0)); return hardware::Void(); } diff --git a/nn/runtime/test/TestFailingDriver.cpp b/nn/runtime/test/TestFailingDriver.cpp index d2e30a656..a7a0aa5d9 100644 --- a/nn/runtime/test/TestFailingDriver.cpp +++ b/nn/runtime/test/TestFailingDriver.cpp @@ -22,6 +22,7 @@ #include "CompilationBuilder.h" #include "ExecutionPlan.h" +#include "HalUtils.h" #include "Manager.h" #include "SampleDriverPartial.h" #include "TestNeuralNetworksWrapper.h" @@ -51,10 +52,7 @@ class FailingTestDriver : public SampleDriverPartial { FailingTestDriver() : SampleDriverPartial(kTestDriverName, &mEmptyOperationResolver) {} hardware::Return getCapabilities_1_3(getCapabilities_1_3_cb cb) override { - cb(V1_3::ErrorStatus::NONE, - {.operandPerformance = {{.type = V1_3::OperandType::TENSOR_FLOAT32, - .info = {.execTime = 0.1, // Faster than CPU. - .powerUsage = 0.1}}}}); + cb(V1_3::ErrorStatus::NONE, makeCapabilities(0.1)); // Faster than CPU. return hardware::Void(); } diff --git a/nn/runtime/test/TestPartitioning.cpp b/nn/runtime/test/TestPartitioning.cpp index 939612a78..8e705afb9 100644 --- a/nn/runtime/test/TestPartitioning.cpp +++ b/nn/runtime/test/TestPartitioning.cpp @@ -33,6 +33,7 @@ #include "ControlFlow.h" #include "ExecutionPlan.h" #include "HalInterfaces.h" +#include "HalUtils.h" #include "Manager.h" #include "ModelBuilder.h" #include "NeuralNetworks.h" @@ -175,16 +176,6 @@ using WrapperSymmPerChannelQuantParams = ::android::nn::test_wrapper::SymmPerCha using WrapperType = ::android::nn::test_wrapper::Type; using android::sp; -V1_3::Capabilities makeCapabilities(float perf) { - V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; - return {.relaxedFloat32toFloat16PerformanceScalar = perfInfo, - .relaxedFloat32toFloat16PerformanceTensor = perfInfo, - .operandPerformance = - ::android::nn::nonExtensionOperandPerformance(perfInfo), - .ifPerformance = perfInfo, - .whilePerformance = perfInfo}; -}; - void update(V1_3::Capabilities* capabilities, V1_3::OperandType type, float perf) { V1_0::PerformanceInfo perfInfo = {.execTime = perf, .powerUsage = perf}; ::android::nn::update(&capabilities->operandPerformance, type, perfInfo); @@ -2056,7 +2047,7 @@ TEST_F(PartitioningTest, Perf) { model.finish(); ASSERT_TRUE(model.isValid()); - const V1_3::Capabilities baseCapabilities = makeCapabilities(0.5); + const V1_3::Capabilities baseCapabilities = ::android::nn::makeCapabilities(0.5); { // better than base @@ -2846,7 +2837,7 @@ TEST_F(PerfTest, Lookup) { // We'll use this to ensure that we can save and then recover a type's performance. auto typePerf = [](V1_3::OperandType type) { return float(static_cast(type)); }; - V1_3::Capabilities capabilities = makeCapabilities(-1.0f); + V1_3::Capabilities capabilities = ::android::nn::makeCapabilities(-1.0f); for (uint32_t type = static_cast(V1_3::OperandTypeRange::FUNDAMENTAL_MIN); type <= static_cast(V1_3::OperandTypeRange::FUNDAMENTAL_MAX); ++type) { diff --git a/nn/runtime/test/TestRemoveDefaultArguments.cpp b/nn/runtime/test/TestRemoveDefaultArguments.cpp index daef6bf60..6b7283f44 100644 --- a/nn/runtime/test/TestRemoveDefaultArguments.cpp +++ b/nn/runtime/test/TestRemoveDefaultArguments.cpp @@ -23,6 +23,7 @@ #include #include "GeneratedTestUtils.h" +#include "HalUtils.h" #include "Manager.h" #include "SampleDriverPartial.h" #include "TestNeuralNetworksWrapper.h" @@ -113,7 +114,7 @@ class TestDriver : public SampleDriverPartial { TestDriver() : SampleDriverPartial(kTestDriverName) {} hardware::Return getCapabilities_1_3(getCapabilities_1_3_cb cb) override { - cb(V1_3::ErrorStatus::NONE, {/* Placeholder zero-filled capabilities. */}); + cb(V1_3::ErrorStatus::NONE, makeCapabilities(1.0)); return hardware::Void(); } -- cgit v1.2.3 From c2674a481270e6b35bddb8c2cf979a72acc70f0b Mon Sep 17 00:00:00 2001 From: Slava Shklyaev Date: Fri, 13 Nov 2020 14:12:24 +0000 Subject: Copy Utils to LegacyUtils and LegacyHalUtils This change was generated with the following commands: cp common/include/Utils.h common/include/LegacyHalUtils.h mv common/include/Utils.h common/include/LegacyUtils.h cp common/Utils.cpp common/LegacyHalUtils.cpp mv common/Utils.cpp common/LegacyUtils.cp sed -i 's/"Utils.cpp"/"LegacyUtils.cp"/' common/Android.bp bpfmt -w -s common/Android.bp This is a separate change in order to help git preserve the history. See change I7ffc5824. Bug: 170289677 Test: N/A Change-Id: I2b54bc131ff4a2f05b3a1e0f180a7a4bbd7bf614 Merged-In: I2b54bc131ff4a2f05b3a1e0f180a7a4bbd7bf614 (cherry picked from commit 1e9f4d242c1159fa3a33020f0abfbfa95ca4be04) --- nn/common/Android.bp | 4 +- nn/common/LegacyHalUtils.cpp | 3565 ++++++++++++++++++++++++++++++++++++ nn/common/LegacyUtils.cpp | 3565 ++++++++++++++++++++++++++++++++++++ nn/common/Utils.cpp | 3565 ------------------------------------ nn/common/include/LegacyHalUtils.h | 611 ++++++ nn/common/include/LegacyUtils.h | 611 ++++++ nn/common/include/Utils.h | 611 ------ 7 files changed, 8354 insertions(+), 4178 deletions(-) create mode 100644 nn/common/LegacyHalUtils.cpp create mode 100644 nn/common/LegacyUtils.cpp delete mode 100644 nn/common/Utils.cpp create mode 100644 nn/common/include/LegacyHalUtils.h create mode 100644 nn/common/include/LegacyUtils.h delete mode 100644 nn/common/include/Utils.h diff --git a/nn/common/Android.bp b/nn/common/Android.bp index c6000438f..366470243 100644 --- a/nn/common/Android.bp +++ b/nn/common/Android.bp @@ -86,8 +86,8 @@ cc_library_static { srcs: [ "ExecutionBurstController.cpp", "ExecutionBurstServer.cpp", + "LegacyUtils.cpp", "MemoryUtils.cpp", - "Utils.cpp", ], header_libs: [ "gemmlowp_headers", @@ -156,12 +156,12 @@ cc_library_static { "ExecutionBurstServer.cpp", "GraphDump.cpp", "IndexedShapeWrapper.cpp", + "LegacyUtils.cpp", "MemoryUtils.cpp", "MetaModel.cpp", "OperationsUtils.cpp", "QuantUtils.cpp", "TokenHasher.cpp", - "Utils.cpp", "ValidateHal.cpp", "operations/ArgMinMax.cpp", "operations/BidirectionalSequenceLSTM.cpp", diff --git a/nn/common/LegacyHalUtils.cpp b/nn/common/LegacyHalUtils.cpp new file mode 100644 index 000000000..7417ed8bf --- /dev/null +++ b/nn/common/LegacyHalUtils.cpp @@ -0,0 +1,3565 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Utils" + +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ControlFlow.h" +#include "NeuralNetworks.h" +#include "NeuralNetworksOEM.h" +#include "OperationResolver.h" +#include "ValidateHal.h" +#include "nnapi/TypeUtils.h" + +namespace android { +namespace nn { + +constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX}; + +const char kVLogPropKey[] = "debug.nn.vlog"; +int vLogMask = ~0; + +// Split the space separated list of tags from verbose log setting and build the +// logging mask from it. note that '1' and 'all' are special cases to enable all +// verbose logging. +// +// NN API verbose logging setting comes from system property debug.nn.vlog. +// Example: +// setprop debug.nn.vlog 1 : enable all logging tags. +// setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and +// COMPILATION tags. +void initVLogMask() { + vLogMask = 0; + const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, ""); + if (vLogSetting.empty()) { + return; + } + + std::unordered_map vLogFlags = {{"1", -1}, + {"all", -1}, + {"model", MODEL}, + {"compilation", COMPILATION}, + {"execution", EXECUTION}, + {"cpuexe", CPUEXE}, + {"manager", MANAGER}, + {"driver", DRIVER}, + {"memory", MEMORY}}; + + std::vector elements = android::base::Split(vLogSetting, " ,:"); + for (const auto& elem : elements) { + const auto& flag = vLogFlags.find(elem); + if (flag == vLogFlags.end()) { + LOG(ERROR) << "Unknown trace flag: " << elem; + continue; + } + + if (flag->second == -1) { + // -1 is used for the special values "1" and "all" that enable all + // tracing. + vLogMask = ~0; + return; + } else { + vLogMask |= 1 << flag->second; + } + } +} + +TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds) { + // According to the standard, std::chrono::nanoseconds::rep is a signed + // integer type of at least 64 bits. This check prevents an overflow when + // rep is exactly 64 bits. + if constexpr (sizeof(std::chrono::nanoseconds::rep) == sizeof(int64_t)) { + nanoseconds = std::min(nanoseconds, + static_cast(std::chrono::nanoseconds::max().count())); + } + return std::chrono::nanoseconds{nanoseconds}; +} + +Deadline makeDeadline(TimeoutDuration duration) { + const auto maxTime = Deadline::max(); + const auto currentTime = std::chrono::steady_clock::now(); + + // If there would be an overflow, use the max value. + if (duration > maxTime - currentTime) { + return maxTime; + } + return currentTime + duration; +} + +static uint64_t getMaxNanosecondsSinceEpoch() { + const auto maxTime = + std::chrono::time_point::max(); + return maxTime.time_since_epoch().count(); +} + +std::optional makeDeadline(const V1_3::OptionalTimePoint& timePoint) { + using Discriminator = V1_3::OptionalTimePoint::hidl_discriminator; + if (timePoint.getDiscriminator() == Discriminator::none) { + return std::nullopt; + } + const uint64_t nanosecondsSinceEpoch = timePoint.nanosecondsSinceEpoch(); + const uint64_t maxNanosecondsSinceEpoch = getMaxNanosecondsSinceEpoch(); + + // Clamp time point to max. + if (nanosecondsSinceEpoch >= maxNanosecondsSinceEpoch) { + return Deadline::max(); + } + + // Return provided time point. + return Deadline{std::chrono::nanoseconds{nanosecondsSinceEpoch}}; +} + +bool hasDeadlinePassed(const std::optional& deadline) { + if (!deadline.has_value()) { + return false; + } + return std::chrono::steady_clock::now() >= *deadline; +} + +static OptionalTimePoint makeTimePoint(const Deadline& deadline) { + return deadline; +} + +OptionalTimePoint makeTimePoint(const std::optional& deadline) { + return deadline.has_value() ? makeTimePoint(*deadline) : OptionalTimePoint{}; +} + +static bool isExtensionOperandType(int32_t type) { + return (static_cast(type) >> kExtensionTypeBits) != 0; +} + +static bool isExtensionOperationType(ANeuralNetworksOperationType type) { + return (static_cast(type) >> kExtensionTypeBits) != 0; +} + +bool isExtensionOperandType(V1_3::OperandType type) { + return isExtensionOperandType(static_cast(type)); +} + +bool isExtensionOperationType(V1_3::OperationType type) { + return isExtensionOperationType(static_cast(type)); +} + +namespace { + +template +EntryType tableLookup(const EntryType (&table)[entryCount], + const EntryType (&tableOEM)[entryCountOEM], uint32_t code) { + if (code < entryCount) { + return table[code]; + } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) { + return tableOEM[code - kOEMCodeBase]; + } else { + nnAssert(!"tableLookup: bad code"); + return EntryType(); + } +} + +static Version convert(HalVersion halVersion) { + switch (halVersion) { + case HalVersion::UNKNOWN: + break; + case HalVersion::V1_0: + return Version::ANDROID_OC_MR1; + case HalVersion::V1_1: + return Version::ANDROID_P; + case HalVersion::V1_2: + return Version::ANDROID_Q; + case HalVersion::V1_3: + return Version::ANDROID_R; + } + LOG(FATAL) << "Cannot convert " << halVersion; + return {}; +} + +class OperationValidationContext : public IOperationValidationContext { + DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext); + + public: + OperationValidationContext(const char* operationName, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, const Operand* operands) + : operationName(operationName), + inputCount(inputCount), + inputIndexes(inputIndexes), + outputCount(outputCount), + outputIndexes(outputIndexes), + operands(operands) {} + + const char* getOperationName() const override; + + uint32_t getNumInputs() const override; + OperandType getInputType(uint32_t index) const override; + Shape getInputShape(uint32_t index) const override; + const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override; + + uint32_t getNumOutputs() const override; + OperandType getOutputType(uint32_t index) const override; + Shape getOutputShape(uint32_t index) const override; + + private: + const Operand* getInputOperand(uint32_t index) const; + const Operand* getOutputOperand(uint32_t index) const; + + const char* operationName; + uint32_t inputCount; + const uint32_t* inputIndexes; + uint32_t outputCount; + const uint32_t* outputIndexes; + const Operand* operands; + Version version; +}; + +const char* OperationValidationContext::getOperationName() const { + return operationName; +} + +const Operand* OperationValidationContext::getInputOperand(uint32_t index) const { + CHECK(index < static_cast(inputCount)); + return &operands[inputIndexes[index]]; +} + +const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const { + CHECK(index < static_cast(outputCount)); + return &operands[outputIndexes[index]]; +} + +uint32_t OperationValidationContext::getNumInputs() const { + return inputCount; +} + +uint32_t OperationValidationContext::getNumOutputs() const { + return outputCount; +} + +OperandType OperationValidationContext::getInputType(uint32_t index) const { + return getInputOperand(index)->type; +} + +Shape OperationValidationContext::getInputShape(uint32_t index) const { + const Operand* operand = getInputOperand(index); + return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, + operand->extraParams}; +} + +const Operand::ExtraParams& OperationValidationContext::getInputExtraParams(uint32_t index) const { + return getInputOperand(index)->extraParams; +} + +OperandType OperationValidationContext::getOutputType(uint32_t index) const { + return getOutputOperand(index)->type; +} + +Shape OperationValidationContext::getOutputShape(uint32_t index) const { + const Operand* operand = getOutputOperand(index); + return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, + operand->extraParams}; +} + +}; // anonymous namespace + +#define COUNT(X) (sizeof(X) / sizeof(X[0])) + +std::string getOperandTypeName(V1_3::OperandType type) { + return toString(type); +} + +std::string getOperationName(V1_3::OperationType type) { + return toString(type); +} + +const uint32_t kSizeOfDataType[]{ + 4, // ANEURALNETWORKS_FLOAT32 + 4, // ANEURALNETWORKS_INT32 + 4, // ANEURALNETWORKS_UINT32 + 4, // ANEURALNETWORKS_TENSOR_FLOAT32 + 4, // ANEURALNETWORKS_TENSOR_INT32 + 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM + 1, // ANEURALNETWORKS_BOOL + 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM + 2, // ANEURALNETWORKS_TENSOR_FLOAT16 + 1, // ANEURALNETWORKS_TENSOR_BOOL8 + 2, // ANEURALNETWORKS_FLOAT16 + 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL + 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM + 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM + 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED + 0, // ANEURALNETWORKS_MODEL +}; + +static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect"); + +const bool kScalarDataType[]{ + true, // ANEURALNETWORKS_FLOAT32 + true, // ANEURALNETWORKS_INT32 + true, // ANEURALNETWORKS_UINT32 + false, // ANEURALNETWORKS_TENSOR_FLOAT32 + false, // ANEURALNETWORKS_TENSOR_INT32 + false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM + true, // ANEURALNETWORKS_BOOL + false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM + false, // ANEURALNETWORKS_TENSOR_FLOAT16 + false, // ANEURALNETWORKS_TENSOR_BOOL8 + true, // ANEURALNETWORKS_FLOAT16 + false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL + false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM + false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM + false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED + true, // ANEURALNETWORKS_MODEL +}; + +static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect"); + +const uint32_t kSizeOfDataTypeOEM[]{ + 0, // ANEURALNETWORKS_OEM + 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE +}; + +static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM, + "kSizeOfDataTypeOEM is incorrect"); + +const bool kScalarDataTypeOEM[]{ + true, // ANEURALNETWORKS_OEM + false, // ANEURALNETWORKS_TENSOR_OEM_BYTE +}; + +static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM, + "kScalarDataTypeOEM is incorrect"); + +bool nonExtensionOperandTypeIsScalar(int type) { + CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported"; + return tableLookup(kScalarDataType, kScalarDataTypeOEM, type); +} + +uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions) { + const size_t size = getNonExtensionSize(type, dimensions).value(); + CHECK_LE(size, std::numeric_limits::max()); + return size; +} + +uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, + const std::vector& dimensions) { + return nonExtensionOperandSizeOfData(uncheckedConvert(type), dimensions); +} + +// Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t. +static std::pair sizeOfTensorDataHelper(uint32_t sizeOfElement, + const std::vector& dimensions) { + if (dimensions.empty()) { + return {false, 0}; + } + uint64_t size = static_cast(sizeOfElement); + constexpr uint64_t kMaxSize = static_cast(std::numeric_limits::max()); + for (uint32_t d : dimensions) { + size *= d; + if (size > kMaxSize) return {true, 0}; + } + return {false, static_cast(size)}; +} + +uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector& dimensions) { + const auto [overflow, size] = sizeOfTensorDataHelper(sizeOfElement, dimensions); + CHECK(!overflow); + return size; +} + +bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, + const std::vector& dimensions) { + CHECK(!isExtension(type)) << "Size of extension operand data is unknown"; + int n = static_cast(type); + uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n); + return tableLookup(kScalarDataType, kScalarDataTypeOEM, n) + ? false + : sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions); +} + +bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, + const std::vector& dimensions) { + return nonExtensionOperandSizeOfDataOverflowsUInt32(uncheckedConvert(type), dimensions); +} + +bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement, + const std::vector& dimensions) { + return sizeOfTensorDataHelper(sizeOfElement, dimensions).first; +} + +bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) { + if (!isExtensionOperandType(type)) { + CHECK(!nonExtensionOperandTypeIsScalar(type)) + << "A scalar type can never have unspecified dimensions"; + } + return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount); +} + +bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector& dimensions) { + return tensorHasUnspecifiedDimensions(static_cast(type), dimensions.data(), + dimensions.size()); +} + +bool tensorHasUnspecifiedDimensions(V1_3::OperandType type, + const std::vector& dimensions) { + return tensorHasUnspecifiedDimensions(static_cast(type), dimensions.data(), + dimensions.size()); +} + +bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) { + return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount); +} + +bool tensorHasUnspecifiedDimensions(const Operand& operand) { + return tensorHasUnspecifiedDimensions(operand.type, operand.dimensions); +} + +bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand) { + return tensorHasUnspecifiedDimensions(static_cast(operand.type), operand.dimensions.data(), + operand.dimensions.size()); +} + +uint32_t alignBytesNeeded(uint32_t index, size_t length) { + uint32_t pattern; + if (length < 2) { + pattern = 0; // No alignment necessary + } else if (length < 4) { + pattern = 1; // Align on 2-byte boundary + } else { + pattern = 3; // Align on 4-byte boundary + } + uint32_t extra = (~(index - 1)) & pattern; + return extra; +} + +void logModelToInfo(const V1_0::Model& model) { + LOG(INFO) << "V1_0::Model start"; + LOG(INFO) << "operands" << toString(model.operands); + LOG(INFO) << "operations" << toString(model.operations); + LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); + LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); + LOG(INFO) << "operandValues size" << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); +} + +void logModelToInfo(const V1_1::Model& model) { + LOG(INFO) << "V1_1::Model start"; + LOG(INFO) << "operands" << toString(model.operands); + LOG(INFO) << "operations" << toString(model.operations); + LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); + LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); + LOG(INFO) << "operandValues size " << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); +} + +void logModelToInfo(const V1_2::Model& model) { + LOG(INFO) << "V1_2::Model start"; + LOG(INFO) << "operands" << toString(model.operands); + LOG(INFO) << "operations" << toString(model.operations); + LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); + LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); + LOG(INFO) << "operandValues size" << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); + LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16; + LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); +} + +static void logSubgraphToInfo(std::string label, const V1_3::Subgraph& subgraph) { + LOG(INFO) << label << ".operands" << toString(subgraph.operands); + LOG(INFO) << label << ".operations" << toString(subgraph.operations); + LOG(INFO) << label << ".inputIndexes" << toString(subgraph.inputIndexes); + LOG(INFO) << label << ".outputIndexes" << toString(subgraph.outputIndexes); +} + +void logModelToInfo(const V1_3::Model& model) { + LOG(INFO) << "V1_3::Model start"; + logSubgraphToInfo("main", model.main); + for (uint32_t i = 0, n = model.referenced.size(); i < n; ++i) { + logSubgraphToInfo("referenced[" + std::to_string(i) + "]", model.referenced[i]); + } + LOG(INFO) << "operandValues size " << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); + LOG(INFO) << "relaxComputationFloat32toFloat16 " << model.relaxComputationFloat32toFloat16; + LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); +} + +void logModelToInfo(const Model& model) { + LOG(INFO) << "Model start"; + logModelToInfo(convertToV1_3(model)); +} + +bool validateOperandSymmPerChannelQuantParams( + const V1_3::Operand& halOperand, + const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag) { + if (halOperand.type != V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { + return false; + } + + NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag; + NN_RET_CHECK(channelQuant.scales != nullptr) << tag; + NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag; + NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u) + << tag << " channel dimension " << channelQuant.channelDim << " is underspecified"; + for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) { + NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]"; + } + return true; +} + +static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type"; + NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type"; + return true; +} + +static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255) + << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type, + const char* tag) { + NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127) + << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535) + << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; + NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero"; + return true; +} + +static bool validateTensorDimensions( + const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, + bool allowPartial) { + if (!allowPartial) { + NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions"; + } + uint64_t size = + isExtensionOperandType(type.type) + ? extensionOperandTypeInfo->byteSize + : tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, static_cast(type.type)); + constexpr uint64_t kMaxSize = std::numeric_limits::max(); + for (uint32_t i = 0; i < type.dimensionCount; i++) { + if (!allowPartial) { + NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions"; + } + if (type.dimensions[i] != 0) { + size *= type.dimensions[i]; + NN_RET_CHECK_LE(size, kMaxSize) << tag << " operand byte size exceeds " << kMaxSize; + } + } + return true; +} + +static bool validateOperandTypeHelper( + const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, + bool allowPartial) { + NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr); + if (isExtensionOperandType(type.type)) { + NN_RET_CHECK(extensionOperandTypeInfo != nullptr); + if (extensionOperandTypeInfo->isTensor) { + NN_RET_CHECK( + validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); + } else { + NN_RET_CHECK(validateScalarDimensions(type, tag)); + } + return validateNoQuantParams(type, tag); + } + + NN_RET_CHECK(extensionOperandTypeInfo == nullptr); + NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type)) + << tag << " invalid OperandType: " << type.type; + + bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type); + if (isScalar) { + NN_RET_CHECK(validateScalarDimensions(type, tag)); + if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types + // to use quantization parameters. + NN_RET_CHECK(validateNoQuantParams(type, tag)); + } + } else { + NN_RET_CHECK(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); + if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) { + NN_RET_CHECK(validateQuant8AsymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) { + NN_RET_CHECK(validateQuant8SymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) { + NN_RET_CHECK(validateQuant16AsymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) { + NN_RET_CHECK(validateQuantSymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) { + // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters. + } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) { + // Historically, we have allowed OEM types to use quantization parameters. + } else { + NN_RET_CHECK(validateNoQuantParams(type, tag)); + } + } + + return true; +} + +int validateOperandType(const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, + const char* tag, bool allowPartial) { + return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial) + ? ANEURALNETWORKS_NO_ERROR + : ANEURALNETWORKS_BAD_DATA; +} + +int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, + const char* tag) { + for (uint32_t i = 0; i < count; i++) { + if (list[i] >= operandCount) { + LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i] + << ", operandCount " << operandCount; + return ANEURALNETWORKS_BAD_DATA; + } + } + return ANEURALNETWORKS_NO_ERROR; +} + +int validateOperationOperandTypes(const std::vector& operands, uint32_t inOperandCount, + const uint32_t* inOperandIndexes, + const std::vector& inExpectedTypes, + uint32_t outOperandCount, const uint32_t* outOperandIndexes, + const std::vector& outExpectedInTypes) { + if (inOperandCount != static_cast(inExpectedTypes.size()) || + outOperandCount != static_cast(outExpectedInTypes.size())) { + LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and " + << outExpectedInTypes.size() << " outputs," + << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs"; + return ANEURALNETWORKS_BAD_DATA; + } + for (uint32_t i = 0; i < inOperandCount; i++) { + if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) { + LOG(ERROR) << "Invalid input tensor type " << operands[inOperandIndexes[i]].type + << " for input " << i << ", expected " << inExpectedTypes[i]; + return ANEURALNETWORKS_BAD_DATA; + } + } + for (uint32_t i = 0; i < outOperandCount; i++) { + if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) { + LOG(ERROR) << "Invalid output tensor type " << operands[outOperandIndexes[i]].type + << " for input " << i << ", expected " << outExpectedInTypes[i]; + return ANEURALNETWORKS_BAD_DATA; + } + } + + return ANEURALNETWORKS_NO_ERROR; +} + +static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion, + HalVersion minSupportedHalVersion) { + if (halVersion < minSupportedHalVersion) { + LOG(ERROR) << "The given inputs and outputs for operation " << opType + << " are only supported in " << minSupportedHalVersion + << " and later (validating using " << halVersion << ")"; + return ANEURALNETWORKS_BAD_DATA; + } + return ANEURALNETWORKS_NO_ERROR; +} + +// Checks if two operands have the same types, ranks (if specified), dimensions +// (if specified), scales, zeroPoints, and extraParams. +static bool compatible(const Operand& a, const Operand& b) { + NN_RET_CHECK(a.type == b.type) << a.type << " != " << b.type; + if (a.dimensions.size() != 0 && b.dimensions.size() != 0) { + NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions"; + for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) { + if (a.dimensions[i] != 0 && b.dimensions[i] != 0) { + NN_RET_CHECK_EQ(a.dimensions[i], b.dimensions[i]) << "Incompatible dimensions"; + } + } + } + NN_RET_CHECK_EQ(a.scale, b.scale); + NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint); + NN_RET_CHECK(a.extraParams == b.extraParams) << a.extraParams << " != " << b.extraParams; + return true; +} + +static bool validateConditionOperand(const Operand& operand) { + NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8) + << "Unexpected condition operand type: " << operand.type; + NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton"; + NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton"; + return true; +} + +static void checkSubgraphValidationHelper(const SubgraphValidationHelper& helper) { + CHECK(helper.isValidSubgraphReference != nullptr); + CHECK(helper.getSubgraphInputCount != nullptr); + CHECK(helper.getSubgraphOutputCount != nullptr); + CHECK(helper.getSubgraphInputOperand != nullptr); + CHECK(helper.getSubgraphOutputOperand != nullptr); +} + +static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, + const uint32_t* outputs, const std::vector& operands, + const SubgraphValidationHelper& helper) { + namespace op = operation_if; + checkSubgraphValidationHelper(helper); + NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_IF must have at least 3 inputs"; + NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_IF must have at least 1 output"; + auto validateBranchOperand = [&](const Operand& branchModelOperand) -> bool { + NN_RET_CHECK(helper.isValidSubgraphReference(branchModelOperand)) + << "Operand is not a valid subgraph reference"; + const uint32_t branchModelInputCount = helper.getSubgraphInputCount(branchModelOperand); + const uint32_t branchModelOutputCount = helper.getSubgraphOutputCount(branchModelOperand); + NN_RET_CHECK_EQ(inputCount, op::kFirstInput + branchModelInputCount); + NN_RET_CHECK_EQ(outputCount, branchModelOutputCount); + for (uint32_t i = 0; i < branchModelInputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphInputOperand(branchModelOperand, i); + const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + } + for (uint32_t i = 0; i < branchModelOutputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphOutputOperand(branchModelOperand, i); + const Operand& outerOperand = operands[outputs[i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + } + return true; + }; + NN_RET_CHECK(validateConditionOperand(operands[inputs[op::kCondBoolOperand]])) + << "Validation failed for IF condition operand"; + NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kThenModelOperand]])) + << "Validation failed for IF then model"; + NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kElseModelOperand]])) + << "Validation failed for IF else model"; + return true; +} + +static bool validateControlFlowOperandUnknownSize(const SubgraphValidationHelper& helper, + const Operand& operand) { + if (!helper.allowControlFlowOperationWithOperandOfUnknownSize && !isExtension(operand.type)) { + NN_RET_CHECK_NE(nonExtensionOperandSizeOfData(operand.type, operand.dimensions), 0u); + } + return true; +} + +static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs, + uint32_t outputCount, const uint32_t* outputs, + const std::vector& operands, + const SubgraphValidationHelper& helper) { + // Let the loop have + // - m >= 1 input-output operands, + // - k >= 0 state-only operands, and + // - n >= 0 input-only operands. + // Then + // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs. + // - the condition model has (m + k + n) inputs and 1 output. + // - the body model has (m + k + n) inputs and (m + k) outputs. + namespace op = operation_while; + checkSubgraphValidationHelper(helper); + NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_WHILE must have at least 3 inputs"; + NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_WHILE must have at least 1 output"; + auto validateCondOperand = [&](const Operand& condModelOperand) -> bool { + NN_RET_CHECK(helper.isValidSubgraphReference(condModelOperand)) + << "Operand is not a valid subgraph reference"; + const uint32_t condModelInputCount = helper.getSubgraphInputCount(condModelOperand); + const uint32_t condModelOutputCount = helper.getSubgraphOutputCount(condModelOperand); + NN_RET_CHECK_EQ(inputCount, op::kFirstInput + condModelInputCount); + NN_RET_CHECK_EQ(condModelOutputCount, 1u); + for (uint32_t i = 0; i < condModelInputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphInputOperand(condModelOperand, i); + const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); + } + NN_RET_CHECK( + validateConditionOperand(*helper.getSubgraphOutputOperand(condModelOperand, 0))); + return true; + }; + auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> bool { + NN_RET_CHECK(helper.isValidSubgraphReference(bodyModelOperand)) + << "Operand is not a valid subgraph reference"; + const uint32_t bodyModelInputCount = helper.getSubgraphInputCount(bodyModelOperand); + const uint32_t bodyModelOutputCount = helper.getSubgraphOutputCount(bodyModelOperand); + NN_RET_CHECK_EQ(inputCount, op::kFirstInput + bodyModelInputCount); + NN_RET_CHECK_GE(bodyModelOutputCount, outputCount); + NN_RET_CHECK_GE(bodyModelInputCount, bodyModelOutputCount); + const uint32_t inputOutputCount = outputCount; + const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount; + const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount; + for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) { + const Operand& innerOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); + const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); + } + for (uint32_t i = 0; i < inputOutputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); + const Operand& outerOperand = operands[outputs[i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); + } + for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) { + const Operand& inputOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); + const Operand& outputOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); + NN_RET_CHECK(compatible(inputOperand, outputOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outputOperand)); + } + return true; + }; + NN_RET_CHECK(validateCondOperand(operands[inputs[op::kCondModelOperand]])) + << "Validation failed for WHILE condition model"; + NN_RET_CHECK(validateBodyOperand(operands[inputs[op::kBodyModelOperand]])) + << "Validation failed for WHILE body model"; + return true; +} + +static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, + const std::vector& operands, HalVersion halVersion) { + if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + LOG(ERROR) << "This validateOperation() overload does not support control flow"; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperation(opType, inputCount, inputIndexes, outputCount, outputIndexes, operands, + halVersion, {}); +} + +int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, const std::vector& operands, + HalVersion halVersion, const SubgraphValidationHelper& helper) { + NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes, + static_cast(operands.size()), + "ANeuralNetworksModel_addOperation inputs")); + NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes, + static_cast(operands.size()), + "ANeuralNetworksModel_addOperation outputs")); + + if (isExtensionOperationType(opType)) { + if (halVersion < HalVersion::V1_2) { + LOG(ERROR) + << "Extension operations are supported since HAL version 1.2, validating using " + << halVersion; + return ANEURALNETWORKS_BAD_DATA; + } + // There is no other validation we can do for an extension operation. + return ANEURALNETWORKS_NO_ERROR; + } + + auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn + << ") or output operands (" << outputCount << ", expected " << expOut + << ") for operation " << opType; + }; + + switch (opType) { + case ANEURALNETWORKS_OEM_OPERATION: { + return ANEURALNETWORKS_NO_ERROR; + } + case ANEURALNETWORKS_RESHAPE: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_DEPTH_TO_SPACE: { + if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 3 or 2) or output operands (" << outputCount + << ", expected 1) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 3) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SPACE_TO_DEPTH: { + if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 3 or 2) or output operands (" << outputCount + << ", expected 1) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 3) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_EMBEDDING_LOOKUP: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[1]].type; + if (inputType != OperandType::TENSOR_FLOAT16 && + inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM && + inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes = {OperandType::TENSOR_INT32, inputType}; + std::vector outExpectedTypes = {inputType}; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else if (inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_HASHTABLE_LOOKUP: { + if (inputCount != 3 || outputCount != 2) { + logInvalidInOutNumber(3, 2); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[2]].type; + if (inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes = {OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, inputType}; + std::vector outExpectedTypes = {inputType, + OperandType::TENSOR_QUANT8_ASYMM}; + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_LSH_PROJECTION: { + if (inputCount != 4 || outputCount != 1) { + logInvalidInOutNumber(4, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[1]].type; + if (inputType != OperandType::TENSOR_FLOAT16 && + inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto hashType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + if (hashType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + inputType, + OperandType::TENSOR_FLOAT16, + OperandType::INT32, + }; + } else if (hashType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + inputType, + OperandType::TENSOR_FLOAT32, + OperandType::INT32, + }; + } else { + LOG(ERROR) << "Unsupported hash tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector outExpectedTypes = {OperandType::TENSOR_INT32}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: { + const uint32_t kNumOutputs = 2; + const uint32_t kNumOutputsMerged = 1; + const uint32_t kNumOutputsWithState = 6; + const uint32_t kNumOutputsMergedWithState = 5; + if (inputCount != 61 || + (outputCount != kNumOutputs && outputCount != kNumOutputsMerged && + outputCount != kNumOutputsWithState && + outputCount != kNumOutputsMergedWithState)) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 61) or output operands (" << outputCount + << ", expected 1, 2, 5 or 6) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + std::vector inExpectedTypes; + auto inputType = operands[inputIndexes[0]].type; + if (inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_FLOAT16) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + inExpectedTypes = {}; + for (int i = 0; i < 48; ++i) { + inExpectedTypes.push_back(inputType); + } + inExpectedTypes.push_back(OperandType::INT32); + inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 + ? OperandType::FLOAT32 + : OperandType::FLOAT16); + inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 + ? OperandType::FLOAT32 + : OperandType::FLOAT16); + inExpectedTypes.push_back(OperandType::BOOL); + inExpectedTypes.push_back(OperandType::BOOL); + for (int i = 0; i < 8; ++i) { + inExpectedTypes.push_back(inputType); + } + + HalVersion minSupportedHalVersion = HalVersion::V1_2; + if (outputCount == kNumOutputsWithState || outputCount == kNumOutputsMergedWithState) { + minSupportedHalVersion = HalVersion::V1_3; + } + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, minSupportedHalVersion)); + std::vector outExpectedTypes(outputCount, inputType); + auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + return status; + } + case ANEURALNETWORKS_LSTM: { + if ((inputCount != 23 && inputCount != 27) || outputCount != 4) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 23 or 27) or output operands (" << outputCount + << ", expected 4) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes; + std::vector outExpectedTypes; + auto inputType = operands[inputIndexes[0]].type; + if (inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_FLOAT16) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + inExpectedTypes = {inputType, inputType, inputType, inputType, inputType, + inputType, inputType, inputType, inputType, inputType, + inputType, inputType, inputType, inputType, inputType, + inputType, inputType, inputType, inputType, inputType, + OperandType::INT32}; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes.push_back(OperandType::FLOAT32); + inExpectedTypes.push_back(OperandType::FLOAT32); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes.push_back(OperandType::FLOAT16); + inExpectedTypes.push_back(OperandType::FLOAT16); + } + + outExpectedTypes = {inputType, inputType, inputType, inputType}; + if (inputCount == 23) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + for (int i = 0; i < 4; ++i) { + inExpectedTypes.push_back(inputType); + } + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: { + if (inputCount != 15 || outputCount != 2) { + logInvalidInOutNumber(15, 2); + return ANEURALNETWORKS_BAD_DATA; + } + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + std::vector inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM, + OperandType::TENSOR_QUANT8_ASYMM}; + std::vector outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM, + OperandType::TENSOR_QUANT8_ASYMM}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_RANDOM_MULTINOMIAL: { + if (inputCount != 3 || outputCount != 1) { + logInvalidInOutNumber(3, 1); + return ANEURALNETWORKS_BAD_DATA; + } + OperandType inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + inputType, + OperandType::INT32, + OperandType::TENSOR_INT32, + }; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector outExpectedTypes = {OperandType::TENSOR_INT32}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_RNN: { + if (inputCount != 6 || outputCount != 2) { + logInvalidInOutNumber(6, 2); + return ANEURALNETWORKS_BAD_DATA; + } + OperandType inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, OperandType::INT32, + }; + outExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, + }; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, OperandType::INT32, + }; + outExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, + }; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SVDF: { + if (inputCount != 7 || outputCount != 2) { + logInvalidInOutNumber(7, 2); + return ANEURALNETWORKS_BAD_DATA; + } + OperandType inputType = operands[inputIndexes[0]].type; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes = { + inputType, inputType, inputType, inputType, + inputType, OperandType::INT32, OperandType::INT32, + }; + std::vector outExpectedTypes = {inputType, inputType}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_BATCH_TO_SPACE_ND: { + if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 3 or 2) or output operands (" << outputCount + << ", expected 1) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 3) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SPACE_TO_BATCH_ND: { + if ((inputCount != 4 && inputCount != 3) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 4 or 3) or output operands (" << outputCount + << ", expected 1) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + if (operands[inputIndexes[0]].zeroPoint != 0) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 4) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_PAD: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + if (operands[inputIndexes[0]].zeroPoint == 0) { + NN_RETURN_IF_ERROR( + validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } else { + NN_RETURN_IF_ERROR( + validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + } + inExpectedTypes = { + inputType, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_PAD_V2: { + if (inputCount != 3 || outputCount != 1) { + logInvalidInOutNumber(3, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + OperandType::FLOAT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + OperandType::FLOAT16, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + inExpectedTypes = { + inputType, + OperandType::TENSOR_INT32, + OperandType::INT32, + }; // TODO(b/116699425): Make it UINT8. + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_CAST: { + if (inputCount != 1 || outputCount != 1) { + logInvalidInOutNumber(1, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputOperand = operands[inputIndexes[0]]; + auto outputOperand = operands[outputIndexes[0]]; + auto inputType = inputOperand.type; + auto outputType = outputOperand.type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if ((inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM) && + (outputType == OperandType::TENSOR_FLOAT16 || + outputType == OperandType::TENSOR_FLOAT32 || + outputType == OperandType::TENSOR_INT32 || + outputType == OperandType::TENSOR_QUANT8_ASYMM)) { + inExpectedTypes = {inputType}; + outExpectedTypes = {outputType}; + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else if (inputType == OperandType::TENSOR_BOOL8 || + inputType == OperandType::TENSOR_QUANT16_ASYMM || + inputType == OperandType::TENSOR_QUANT16_SYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || + inputType == OperandType::TENSOR_QUANT8_SYMM) { + inExpectedTypes = {inputType}; + outExpectedTypes = {inputType}; // Only identity CAST is supported. + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + LOG(ERROR) << "Unsupported data type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + // Validate that output shape is equal to input shape if dimensions + // are already known. + auto getNumberOfElements = [](const hardware::hidl_vec& dims) { + if (dims.size() == 0) { + return 0; + } + return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<>()); + }; + if (inputOperand.dimensions.size() != 0 && outputOperand.dimensions.size() != 0 && + getNumberOfElements(outputOperand.dimensions) != 0 && + inputOperand.dimensions != outputOperand.dimensions) { + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_MEAN: { + if (inputCount != 3 || outputCount != 1) { + logInvalidInOutNumber(3, 1); + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes = {inputType, OperandType::TENSOR_INT32, + OperandType::INT32}; + std::vector outExpectedTypes = {inputType}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_ARGMAX: + case ANEURALNETWORKS_ARGMIN: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_INT32}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_EXPAND_DIMS: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, OperandType::INT32}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SPLIT: { + if (inputCount != 3) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)" + << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + if (inputType != OperandType::TENSOR_FLOAT16 && + inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM && + inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + std::vector inExpectedTypes = {inputType, OperandType::INT32, + OperandType::INT32}; + std::vector outExpectedTypes(outputCount, inputType); + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_MAXIMUM: + case ANEURALNETWORKS_MINIMUM: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes; + std::vector outExpectedTypes; + OperandType inputType = operands[inputIndexes[0]].type; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, inputType}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_GROUPED_CONV_2D: { + if ((inputCount != 12 && inputCount != 9) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 12 or 9) or output operands (" << outputCount + << ", expected 1) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + auto filterType = operands[inputIndexes[1]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, OperandType::INT32, + OperandType::INT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + if (filterType != inputType && + filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { + LOG(ERROR) << "Unsupported filter tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL && + std::get( + operands[inputIndexes[1]].extraParams) + .channelDim != 0) { + LOG(ERROR) << "Unsupported filter tensor channel dimension for operation " + << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + inExpectedTypes = { + inputType, filterType, OperandType::TENSOR_INT32, + OperandType::INT32, OperandType::INT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + if (inputCount == 12) { + std::vector explicitScalarTypes(3, OperandType::INT32); + inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(), + explicitScalarTypes.end()); + } + inExpectedTypes.push_back(OperandType::BOOL); + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_TILE: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, OperandType::TENSOR_INT32}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_POW: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = {inputType, inputType}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_IF: { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + return validateIfOperation(inputCount, inputIndexes, outputCount, outputIndexes, + operands, helper) + ? ANEURALNETWORKS_NO_ERROR + : ANEURALNETWORKS_BAD_DATA; + } + case ANEURALNETWORKS_WHILE: { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + return validateWhileOperation(inputCount, inputIndexes, outputCount, outputIndexes, + operands, helper) + ? ANEURALNETWORKS_NO_ERROR + : ANEURALNETWORKS_BAD_DATA; + } + default: { + const OperationRegistration* operationRegistration = + BuiltinOperationResolver::get()->findOperation( + static_cast(opType)); + if (operationRegistration == nullptr) { + if (0 <= opType && opType < kNumberOfOperationTypes) { + LOG(ERROR) << opType << " not registered"; + } else { + LOG(ERROR) << "Operation type " << opType << " out of the range [0, " + << kNumberOfOperationTypes << ")"; + } + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + if (operationRegistration->validate == nullptr) { + LOG(ERROR) << "Incomplete operation registration: " << opType; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + OperationValidationContext context(operationRegistration->name, inputCount, + inputIndexes, outputCount, outputIndexes, + operands.data()); + const auto maybeVersion = operationRegistration->validate(&context); + if (!maybeVersion.has_value()) { + LOG(ERROR) << "Validation failed for operation " << opType << ": " + << maybeVersion.error(); + return ANEURALNETWORKS_BAD_DATA; + } + if (!validateVersion(&context, convert(halVersion), maybeVersion.value())) { + LOG(ERROR) << "Validation failed for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + return ANEURALNETWORKS_NO_ERROR; + } + } +} + +ErrorStatus convertResultCodeToErrorStatus(int resultCode) { + switch (resultCode) { + case ANEURALNETWORKS_NO_ERROR: + return ErrorStatus::NONE; + + case ANEURALNETWORKS_BAD_DATA: + case ANEURALNETWORKS_UNEXPECTED_NULL: + return ErrorStatus::INVALID_ARGUMENT; + + case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE: + return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; + + case ANEURALNETWORKS_UNAVAILABLE_DEVICE: + return ErrorStatus::DEVICE_UNAVAILABLE; + + case ANEURALNETWORKS_BAD_STATE: + case ANEURALNETWORKS_INCOMPLETE: + case ANEURALNETWORKS_OP_FAILED: + case ANEURALNETWORKS_OUT_OF_MEMORY: + case ANEURALNETWORKS_UNMAPPABLE: + case ANEURALNETWORKS_DEAD_OBJECT: + return ErrorStatus::GENERAL_FAILURE; + + case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT: + return ErrorStatus::MISSED_DEADLINE_TRANSIENT; + case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT: + return ErrorStatus::MISSED_DEADLINE_PERSISTENT; + case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT: + return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT; + case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT: + return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT; + } + LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE"; + return ErrorStatus::GENERAL_FAILURE; +} + +int convertErrorStatusToResultCode(ErrorStatus status) { + switch (status) { + case ErrorStatus::NONE: + return ANEURALNETWORKS_NO_ERROR; + case ErrorStatus::DEVICE_UNAVAILABLE: + return ANEURALNETWORKS_UNAVAILABLE_DEVICE; + case ErrorStatus::GENERAL_FAILURE: + return ANEURALNETWORKS_OP_FAILED; + case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: + return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE; + case ErrorStatus::INVALID_ARGUMENT: + return ANEURALNETWORKS_BAD_DATA; + case ErrorStatus::MISSED_DEADLINE_TRANSIENT: + return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT; + case ErrorStatus::MISSED_DEADLINE_PERSISTENT: + return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT; + case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: + return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT; + case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: + return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT; + case ErrorStatus::DEAD_OBJECT: + return ANEURALNETWORKS_DEAD_OBJECT; + } + LOG(ERROR) << "Unknown ErrorStatus " << status << " mapped to ANEURALNETWORKS_OP_FAILED"; + return ANEURALNETWORKS_OP_FAILED; +} + +V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode) { + return convertToV1_3(convertResultCodeToErrorStatus(resultCode)); +} + +int convertErrorStatusToResultCode(V1_3::ErrorStatus status) { + return convertErrorStatusToResultCode(uncheckedConvert(status)); +} + +std::tuple, Timing> getExecutionResult( + V1_3::ErrorStatus status, const hardware::hidl_vec& outputShapes, + const V1_2::Timing& timing) { + return getExecutionResult(uncheckedConvert(status), uncheckedConvert(outputShapes), + uncheckedConvert(timing)); +} + +std::tuple, Timing> getExecutionResult( + ErrorStatus status, std::vector outputShapes, Timing timing) { + constexpr Timing kNoTiming = {std::numeric_limits::max(), + std::numeric_limits::max()}; + const int n = convertErrorStatusToResultCode(status); + if (status != ErrorStatus::NONE && status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE && + !outputShapes.empty()) { + LOG(ERROR) << "The driver returned OutputShapes when it shouldn't."; + outputShapes.clear(); + } + if (status != ErrorStatus::NONE && timing != kNoTiming) { + LOG(ERROR) << "The driver returned Timing when it shouldn't."; + timing = kNoTiming; + } + return {n, std::move(outputShapes), timing}; +} + +// Capabilities::operandPerformance utilities. +// The field Capabilities::operandPerformance is a vector sorted by the field +// Capabilities::OperandPerformance::type. + +template +hardware::hidl_vec> nonExtensionOperandPerformance( + V1_0::PerformanceInfo perf) { + using OpPerf = VersionedOperandPerformance; + + // Note: range presents enumerators in declaration order, not in numerical order. + static constexpr hardware::hidl_enum_range> kOperandTypeRange; + + std::vector ret; + ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin()); + for (VersionedOperandType type : kOperandTypeRange) { + if (static_cast(type) != V1_3::OperandType::SUBGRAPH) { + ret.push_back(OpPerf{type, perf}); + } + } + std::sort(ret.begin(), ret.end(), + [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; }); + + return ret; +} + +template hardware::hidl_vec +nonExtensionOperandPerformance(V1_0::PerformanceInfo perf); +template hardware::hidl_vec +nonExtensionOperandPerformance(V1_0::PerformanceInfo perf); + +template +void update(hardware::hidl_vec>* operandPerformance, + VersionedOperandType type, V1_0::PerformanceInfo perf) { + CHECK(operandPerformance != nullptr); + const auto it = + std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type, + [](const VersionedOperandPerformance& perf, + VersionedOperandType type) { return perf.type < type; }); + CHECK(it != operandPerformance->end()) + << toString(type) << " not in " << toString(*operandPerformance); + it->info = perf; +} + +void update(hardware::hidl_vec* operandPerformance, + V1_2::OperandType type, V1_0::PerformanceInfo perf) { + update(operandPerformance, type, perf); +} +void update(hardware::hidl_vec* operandPerformance, + V1_3::OperandType type, V1_0::PerformanceInfo perf) { + update(operandPerformance, type, perf); +} + +template +V1_0::PerformanceInfo lookup( + const hardware::hidl_vec>& operandPerformance, + VersionedOperandType type) { + const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type, + [](const VersionedOperandPerformance& perf, + VersionedOperandType type) { + return static_cast(perf.type) < + static_cast(type); + }); + if (it == operandPerformance.end()) { + LOG(WARNING) << "No PerformanceInfo for " << toString(type); + return kNoPerformanceInfo; + } else { + return it->info; + } +} + +V1_0::PerformanceInfo lookup( + const hardware::hidl_vec& operandPerformance, + V1_2::OperandType type) { + return lookup(operandPerformance, type); +} +V1_0::PerformanceInfo lookup( + const hardware::hidl_vec& operandPerformance, + V1_3::OperandType type) { + CHECK(type != V1_3::OperandType::SUBGRAPH) + << "Use Capabilities::ifPerformance or Capabilities::whilePerformance"; + return lookup(operandPerformance, type); +} + +// Versioning + +// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM. +// This array must be in sorted order. +static const V1_3::OperandType kQuantized8PerformanceConsistentWithP[] = { + V1_3::OperandType::INT32, V1_3::OperandType::UINT32, V1_3::OperandType::TENSOR_INT32, + V1_3::OperandType::OEM, V1_3::OperandType::TENSOR_OEM_BYTE}; + +static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) { + const V1_0::PerformanceInfo quantized8Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM); + return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), + std::end(kQuantized8PerformanceConsistentWithP), + [quantized8Performance, &capabilities](V1_3::OperandType type) { + return quantized8Performance == + lookup(capabilities.operandPerformance, + static_cast(type)); + }); +} + +static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) { + const V1_0::PerformanceInfo quantized8Performance = + lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM); + return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), + std::end(kQuantized8PerformanceConsistentWithP), + [quantized8Performance, &capabilities](V1_3::OperandType type) { + return quantized8Performance == + lookup(capabilities.operandPerformance, type); + }); +} + +static hardware::hidl_vec +makeQuantized8PerformanceConsistentWithP(V1_0::PerformanceInfo quantized8Performance) { + hardware::hidl_vec ret( + std::size(kQuantized8PerformanceConsistentWithP)); + std::transform(std::begin(kQuantized8PerformanceConsistentWithP), + std::end(kQuantized8PerformanceConsistentWithP), ret.begin(), + [quantized8Performance]( + V1_3::OperandType type) -> V1_2::Capabilities::OperandPerformance { + return {static_cast(type), quantized8Performance}; + }); + return ret; +} + +bool compliantWithV1_0(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_0(const V1_1::Capabilities& capabilities) { + return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance; +} + +bool compliantWithV1_0(const V1_2::Capabilities& capabilities) { + const V1_0::PerformanceInfo perfTensorFloat32 = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32); + const V1_0::PerformanceInfo perfFloat32 = + lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32); + if (perfTensorFloat32 != perfFloat32 || + perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || + perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_0(const V1_3::Capabilities& capabilities) { + const V1_0::PerformanceInfo perfTensorFloat32 = + lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32); + const V1_0::PerformanceInfo perfFloat32 = + lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32); + if (perfTensorFloat32 != perfFloat32 || + perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || + perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_1(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_1(const V1_1::Capabilities&) { + return true; +} + +bool compliantWithV1_1(const V1_2::Capabilities& capabilities) { + if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != + capabilities.relaxedFloat32toFloat16PerformanceScalar) || + (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) != + lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_1(const V1_3::Capabilities& capabilities) { + if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != + capabilities.relaxedFloat32toFloat16PerformanceScalar) || + (lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32) != + lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32))) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_2(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_2(const V1_1::Capabilities&) { + return true; +} + +bool compliantWithV1_2(const V1_2::Capabilities&) { + return true; +} + +bool compliantWithV1_2(const V1_3::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_1::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_2::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_3::Capabilities&) { + return true; +} + +V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) { + return status; +} + +V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) { + switch (status) { + case V1_3::ErrorStatus::NONE: + return V1_0::ErrorStatus::NONE; + case V1_3::ErrorStatus::DEVICE_UNAVAILABLE: + return V1_0::ErrorStatus::DEVICE_UNAVAILABLE; + case V1_3::ErrorStatus::GENERAL_FAILURE: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: + return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; + case V1_3::ErrorStatus::INVALID_ARGUMENT: + return V1_0::ErrorStatus::INVALID_ARGUMENT; + case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + } + LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE"; + return V1_0::ErrorStatus::GENERAL_FAILURE; +} + +V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) { + return static_cast(status); +} + +V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) { + return status; +} + +static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) { + return static_cast(type); +} + +static V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) { + return static_cast(type); +} + +V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type) { + return static_cast(type); +} + +static V1_1::OperationType convertToV1_1(V1_0::OperationType type) { + return static_cast(type); +} + +static V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) { + return static_cast(type); +} + +V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type) { + return static_cast(type); +} + +static V1_2::OperationType convertToV1_2(V1_0::OperationType type) { + return static_cast(type); +} + +static V1_2::OperationType convertToV1_2(V1_1::OperationType type) { + return static_cast(type); +} + +V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type) { + return static_cast(type); +} + +static V1_3::OperationType convertToV1_3(V1_0::OperationType type) { + return static_cast(type); +} + +static V1_3::OperationType convertToV1_3(V1_1::OperationType type) { + return static_cast(type); +} + +static V1_3::OperationType convertToV1_3(V1_2::OperationType type) { + return static_cast(type); +} + +V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) { + return capabilities; +} + +V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) { + if (!compliantWithV1_0(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_1::Capabilities to V1_0::Capabilities"; + } + return {.float32Performance = capabilities.float32Performance, + .quantized8Performance = capabilities.quantized8Performance}; +} + +V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) { + if (!compliantWithV1_0(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_2::Capabilities to V1_0::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), + .quantized8Performance = lookup(capabilities.operandPerformance, + V1_2::OperandType::TENSOR_QUANT8_ASYMM)}; +} + +V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) { + if (!compliantWithV1_0(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_3::Capabilities to V1_0::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32), + .quantized8Performance = lookup(capabilities.operandPerformance, + V1_3::OperandType::TENSOR_QUANT8_ASYMM)}; +} + +V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) { + return {.float32Performance = capabilities.float32Performance, + .quantized8Performance = capabilities.quantized8Performance, + .relaxedFloat32toFloat16Performance = capabilities.float32Performance}; +} + +V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) { + return capabilities; +} + +V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) { + if (!compliantWithV1_1(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_2::Capabilities to V1_1::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), + .quantized8Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM), + .relaxedFloat32toFloat16Performance = + capabilities.relaxedFloat32toFloat16PerformanceTensor}; +} + +V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) { + if (!compliantWithV1_1(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_3::Capabilities to V1_1::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32), + .quantized8Performance = + lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM), + .relaxedFloat32toFloat16Performance = + capabilities.relaxedFloat32toFloat16PerformanceTensor}; +} + +V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) { + V1_2::Capabilities ret = { + .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance, + .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance, + .operandPerformance = + makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)}; + auto& opPerf = ret.operandPerformance; + opPerf.resize(opPerf.size() + 2); + opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, + capabilities.float32Performance}; + opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; + using OperandPerformance = V1_2::Capabilities::OperandPerformance; + std::sort(opPerf.begin(), opPerf.end(), + [](const OperandPerformance& a, const OperandPerformance& b) { + return a.type < b.type; + }); + return ret; +} + +V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) { + V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar = + capabilities.relaxedFloat32toFloat16Performance, + .relaxedFloat32toFloat16PerformanceTensor = + capabilities.relaxedFloat32toFloat16Performance, + .operandPerformance = makeQuantized8PerformanceConsistentWithP( + capabilities.quantized8Performance)}; + auto& opPerf = ret.operandPerformance; + opPerf.resize(opPerf.size() + 2); + opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, + capabilities.float32Performance}; + opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; + using OperandPerformance = V1_2::Capabilities::OperandPerformance; + std::sort(opPerf.begin(), opPerf.end(), + [](const OperandPerformance& a, const OperandPerformance& b) { + return a.type < b.type; + }); + return ret; +} + +V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) { + return capabilities; +} + +V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) { + V1_2::Capabilities ret = { + .relaxedFloat32toFloat16PerformanceScalar = + capabilities.relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = + capabilities.relaxedFloat32toFloat16PerformanceTensor, + }; + const auto& inputOpPerf = capabilities.operandPerformance; + hardware::hidl_vec opPerfSupported; + opPerfSupported.resize(inputOpPerf.size()); + auto last = + std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(), + [](V1_3::Capabilities::OperandPerformance opPerf) { + return validOperandType(static_cast(opPerf.type)); + }); + opPerfSupported.resize(std::distance(opPerfSupported.begin(), last)); + + auto& convertedOpPerf = ret.operandPerformance; + convertedOpPerf.resize(opPerfSupported.size()); + std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(), + [](V1_3::Capabilities::OperandPerformance opPerf) { + return V1_2::Capabilities::OperandPerformance{ + static_cast(opPerf.type), opPerf.info}; + }); + return ret; +} + +V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) { + return convertToV1_3(convertToV1_2(capabilities)); +} + +V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) { + return convertToV1_3(convertToV1_2(capabilities)); +} + +V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) { + V1_3::Capabilities ret = { + .relaxedFloat32toFloat16PerformanceScalar = + capabilities.relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = + capabilities.relaxedFloat32toFloat16PerformanceTensor, + .ifPerformance = kNoPerformanceInfo, + .whilePerformance = kNoPerformanceInfo, + }; + auto& opPerf = ret.operandPerformance; + opPerf.resize(capabilities.operandPerformance.size()); + std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), + opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) { + return V1_3::Capabilities::OperandPerformance{ + static_cast(opPerf.type), opPerf.info}; + }); + return ret; +} + +V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) { + return capabilities; +} + +static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) { + return {.type = uncheckedConvertToV1_0(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) { + return {.type = convertToV1_1(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static hardware::hidl_vec uncheckedConvertToV1_0( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_1( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_0::Operation& operation) { return convertToV1_1(operation); }); + return result; +} + +bool compliantWithV1_0(const V1_3::Operand& operand) { + return validOperandType(static_cast(operand.type)) && + (nonExtensionOperandTypeIsScalar(static_cast(operand.type)) || + operand.dimensions.size() != 0) && + compliantWithV1_0(operand.lifetime); +} + +bool compliantWithV1_2(const V1_3::Operand& operand) { + return validOperandType(static_cast(operand.type)) && + compliantWithV1_0(operand.lifetime); +} + +bool compliantWithV1_3(const V1_3::Operand& operand) { + return true; +} + +static bool compliantWith(HalVersion version, const V1_3::Model& model, + std::set* noncompliantOperations) { + // A boolean vector indicating whether each pool is compliant with the target HAL version. + std::vector isPoolCompliant(model.pools.size(), false); + std::transform( + model.pools.begin(), model.pools.end(), isPoolCompliant.begin(), + [version](const hardware::hidl_memory& pool) { return validatePool(pool, version); }); + + // A boolean vector indicating whether each operand is compliant with the target HAL version. + std::vector isOperandCompliant(model.main.operands.size(), false); + std::transform(model.main.operands.begin(), model.main.operands.end(), + isOperandCompliant.begin(), + [&isPoolCompliant, version](const V1_3::Operand& op) { + bool is_operand_compliant = false; + switch (version) { + case HalVersion::UNKNOWN: + is_operand_compliant = false; + break; + case HalVersion::V1_0: + is_operand_compliant = compliantWithV1_0(op); + break; + case HalVersion::V1_1: + // There is no V1_1::Operand -- both V1_0::Model + // and V1_1::Model use V1_0::Operand. + is_operand_compliant = compliantWithV1_0(op); + break; + case HalVersion::V1_2: + is_operand_compliant = compliantWithV1_2(op); + break; + case HalVersion::V1_3: + is_operand_compliant = compliantWithV1_3(op); + break; + } + return is_operand_compliant && + !(op.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE && + !isPoolCompliant[op.location.poolIndex]); + }); + + auto allOperandsCompliant = [&isOperandCompliant](const hardware::hidl_vec& indices) { + return std::all_of( + indices.begin(), indices.end(), + [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; }); + }; + + auto localValidateOperation = [&model, version, + &allOperandsCompliant](const V1_3::Operation& op) { + if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false; + int error = validateOperation(static_cast(op.type), op.inputs.size(), + op.inputs.size() > 0 ? op.inputs.data() : nullptr, + op.outputs.size(), + op.outputs.size() > 0 ? op.outputs.data() : nullptr, + uncheckedConvert(model.main.operands), version); + return error == ANEURALNETWORKS_NO_ERROR; + }; + + if (noncompliantOperations) { + CHECK(noncompliantOperations->empty()); + for (uint32_t idx = 0; idx < model.main.operations.size(); ++idx) { + if (!localValidateOperation(model.main.operations[idx])) { + noncompliantOperations->insert(idx); + } + } + return noncompliantOperations->empty(); + } else { + return std::all_of(model.main.operations.begin(), model.main.operations.end(), + localValidateOperation); + } +} + +bool compliantWithV1_0(const V1_0::Model& model) { + return true; +} + +bool compliantWithV1_0(const V1_1::Model& model) { + // In addition to new enumeration values being introduced in V1_1::Model, a + // new flag was introduced to indicate whether or not float32 data can be + // calculated using float16 units. This 'relaxComputationFloat32toFloat16' + // flag is not relevant in whether a V1_1::Model is compliant with a + // V1_0::Model because all 1.0 drivers require strict calculation by default + // in the P NN runtime. Even if fp16 calculations are allowed, they can + // still be computed by a strict fp32 driver. + auto operands = uncheckedConvert(convertToV1_3(model.operands)); + return std::all_of(model.operations.begin(), model.operations.end(), + [&operands](const V1_1::Operation& op) { + int error = validateOperation( + static_cast(op.type), op.inputs.size(), + op.inputs.size() > 0 ? op.inputs.data() : nullptr, + op.outputs.size(), + op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands, + HalVersion::V1_0); + return error == ANEURALNETWORKS_NO_ERROR; + }); +} + +bool compliantWithV1_0(const V1_2::Model& model, std::set* noncompliantOperations) { + return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations); +} + +bool compliantWithV1_0(const V1_3::Model& model, std::set* noncompliantOperations) { + return compliantWith(HalVersion::V1_0, model, noncompliantOperations); +} + +bool compliantWithV1_1(const V1_0::Model&) { + return true; +} + +bool compliantWithV1_1(const V1_1::Model&) { + return true; +} + +bool compliantWithV1_1(const V1_2::Model& model, std::set* noncompliantOperations) { + return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations); +} + +bool compliantWithV1_1(const V1_3::Model& model, std::set* noncompliantOperations) { + return compliantWith(HalVersion::V1_1, model, noncompliantOperations); +} + +bool compliantWithV1_2(const V1_0::Model&) { + return true; +} + +bool compliantWithV1_2(const V1_1::Model&) { + return true; +} + +bool compliantWithV1_2(const V1_2::Model&, std::set* noncompliantOperations) { + return true; +} + +bool compliantWithV1_2(const V1_3::Model& model, std::set* noncompliantOperations) { + return compliantWith(HalVersion::V1_2, model, noncompliantOperations); +} + +static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) { + return {.type = uncheckedConvertToV1_0(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) { + return {.type = uncheckedConvertToV1_0(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) { + return {.type = uncheckedConvertToV1_1(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) { + return {.type = uncheckedConvertToV1_1(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) { + return {.type = convertToV1_2(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) { + return {.type = convertToV1_2(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) { + return {.type = uncheckedConvertToV1_2(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) { + return {.type = convertToV1_3(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) { + return {.type = convertToV1_3(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) { + return {.type = convertToV1_3(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static hardware::hidl_vec uncheckedConvertToV1_0( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); }); + return result; +} + +static hardware::hidl_vec uncheckedConvertToV1_0( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); }); + return result; +} + +static hardware::hidl_vec uncheckedConvertToV1_2( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); }); + return result; +} + +static hardware::hidl_vec uncheckedConvertToV1_1( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); }); + return result; +} + +static hardware::hidl_vec uncheckedConvertToV1_1( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_2( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_0::Operation& operation) { return convertToV1_2(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_2( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_1::Operation& operation) { return convertToV1_2(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_3( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_0::Operation& operation) { return convertToV1_3(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_3( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_1::Operation& operation) { return convertToV1_3(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_3( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_2::Operation& operation) { return convertToV1_3(operation); }); + return result; +} + +static bool compliantWithV1_0(const V1_2::OperandType& operandType) { + return validOperandType(static_cast(operandType)); +} + +static bool compliantWithV1_0(const V1_3::OperandType& operandType) { + return validOperandType(static_cast(operandType)); +} + +static bool compliantWithV1_2(const V1_3::OperandType& operandType) { + return validOperandType(static_cast(operandType)); +} + +V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) { + if (!compliantWithV1_0(operandType)) { + LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) + << " from V1_2::OperandType to V1_0::OperandType"; + } + return static_cast(operandType); +} + +V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) { + return static_cast(operandType); +} + +V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) { + if (!compliantWithV1_2(operandType)) { + LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) + << " from V1_3::OperandType to V1_2::OperandType"; + } + return static_cast(operandType); +} + +V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) { + if (!compliantWithV1_0(operandType)) { + LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) + << " from V1_3::Operand to V1_0::Operand"; + } + return static_cast(operandType); +} + +bool compliantWithV1_0(V1_0::OperandLifeTime lifetime) { + return true; +} + +bool compliantWithV1_0(V1_3::OperandLifeTime lifetime) { + return lifetime != V1_3::OperandLifeTime::SUBGRAPH; +} + +bool compliantWithV1_3(V1_0::OperandLifeTime lifetime) { + return true; +} + +bool compliantWithV1_3(V1_3::OperandLifeTime lifetime) { + return true; +} + +V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime) { + return lifetime; +} + +V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime) { + if (!compliantWithV1_0(lifetime)) { + LOG(ERROR) << "Upcasting non-compliant lifetime " << toString(lifetime) + << " from V1_3 to V1_0"; + } + return static_cast(lifetime); +} + +V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime) { + return static_cast(lifetime); +} + +V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime) { + return lifetime; +} + +V1_0::Operand convertToV1_0(const V1_2::Operand& operand) { + return {.type = convertToV1_0(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_0(operand.lifetime), + .location = operand.location}; +} + +V1_0::Operand convertToV1_0(const V1_3::Operand& operand) { + return {.type = convertToV1_0(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_0(operand.lifetime), + .location = operand.location}; +} + +V1_2::Operand convertToV1_2(const V1_0::Operand& operand) { + return {.type = convertToV1_2(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = operand.lifetime, + .location = operand.location}; +} + +V1_2::Operand convertToV1_2(const V1_3::Operand& operand) { + return {.type = convertToV1_2(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = static_cast(operand.lifetime), + .location = operand.location, + .extraParams = operand.extraParams}; +} + +V1_3::Operand convertToV1_3(const V1_0::Operand& operand) { + return {.type = static_cast(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_3(operand.lifetime), + .location = operand.location}; +} + +V1_3::Operand convertToV1_3(const V1_2::Operand& operand) { + return {.type = static_cast(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_3(operand.lifetime), + .location = operand.location, + .extraParams = operand.extraParams}; +} + +V1_3::Operand convertToV1_3(const V1_3::Operand& operand) { + return operand; +} + +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { + return operands; +} + +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_2::Operand& operand) { return convertToV1_0(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_3::Operand& operand) { return convertToV1_0(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_0::Operand& operand) { return convertToV1_2(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { + return operands; +} + +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_3::Operand& operand) { return convertToV1_2(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_0::Operand& operand) { return convertToV1_3(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_2::Operand& operand) { return convertToV1_3(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { + return operands; +} + +V1_0::Model convertToV1_0(const V1_0::Model& model) { + return model; +} + +V1_0::Model convertToV1_0(const V1_1::Model& model) { + if (!compliantWithV1_0(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_1::Model to V1_0::Model"; + } + return {.operands = model.operands, + .operations = uncheckedConvertToV1_0(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools}; +} + +V1_0::Model convertToV1_0(const V1_2::Model& model) { + if (!compliantWithV1_0(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_2::Model to V1_0::Model"; + } + return {.operands = convertToV1_0(model.operands), + .operations = uncheckedConvertToV1_0(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools}; +} + +V1_0::Model convertToV1_0(const V1_3::Model& model) { + if (!compliantWithV1_0(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_3::Model to V1_0::Model"; + } + return {.operands = convertToV1_0(model.main.operands), + .operations = uncheckedConvertToV1_0(model.main.operations), + .inputIndexes = model.main.inputIndexes, + .outputIndexes = model.main.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools}; +} + +V1_1::Model convertToV1_1(const V1_0::Model& model) { + return {.operands = model.operands, + .operations = convertToV1_1(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = false}; +} + +V1_1::Model convertToV1_1(const V1_1::Model& model) { + return model; +} + +V1_1::Model convertToV1_1(const V1_2::Model& model) { + if (!compliantWithV1_1(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_2::Model to V1_1::Model"; + } + return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical. + .operations = uncheckedConvertToV1_1(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_1::Model convertToV1_1(const V1_3::Model& model) { + if (!compliantWithV1_1(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_3::Model to V1_1::Model"; + } + return {// Operands in 1.1 and 1.0 are identical. + .operands = convertToV1_0(model.main.operands), + .operations = uncheckedConvertToV1_1(model.main.operations), + .inputIndexes = model.main.inputIndexes, + .outputIndexes = model.main.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_2::Model convertToV1_2(const V1_0::Model& model) { + return {.operands = convertToV1_2(model.operands), + .operations = convertToV1_2(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = false}; +} + +V1_2::Model convertToV1_2(const V1_1::Model& model) { + return {.operands = convertToV1_2(model.operands), + .operations = convertToV1_2(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_2::Model convertToV1_2(const V1_2::Model& model) { + return model; +} + +V1_2::Model convertToV1_2(const V1_3::Model& model) { + if (!compliantWithV1_2(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_3::Model to V1_2::Model"; + } + return {.operands = convertToV1_2(model.main.operands), + .operations = uncheckedConvertToV1_2(model.main.operations), + .inputIndexes = model.main.inputIndexes, + .outputIndexes = model.main.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, + .extensionNameToPrefix = model.extensionNameToPrefix}; +} + +V1_3::Model convertToV1_3(const V1_0::Model& model) { + return {.main = {.operands = convertToV1_3(model.operands), + .operations = convertToV1_3(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes}, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = false}; +} + +V1_3::Model convertToV1_3(const V1_1::Model& model) { + return {.main = {.operands = convertToV1_3(model.operands), + .operations = convertToV1_3(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes}, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_3::Model convertToV1_3(const V1_2::Model& model) { + return {.main = {.operands = convertToV1_3(model.operands), + .operations = convertToV1_3(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes}, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, + .extensionNameToPrefix = model.extensionNameToPrefix}; +} + +V1_3::Model convertToV1_3(const V1_3::Model& model) { + return model; +} + +bool compliantWithV1_0(const V1_0::Request& request) { + return true; +} + +bool compliantWithV1_0(const V1_3::Request& request) { + return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { + if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { + return false; + } + const auto& name = pool.hidlMemory().name(); + return name == "ashmem" || name == "mmap_fd"; + }); +} + +bool compliantWithV1_2(const V1_3::Request& request) { + return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { + if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { + return false; + } + const auto& name = pool.hidlMemory().name(); + return name == "ashmem" || name == "mmap_fd" || name == "hardware_buffer_blob" || + name == "hardware_buffer"; + }); +} + +static hardware::hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) { + switch (pool.getDiscriminator()) { + case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory: + return pool.hidlMemory(); + case V1_3::Request::MemoryPool::hidl_discriminator::token: + return hardware::hidl_memory{}; + } +} + +static V1_3::Request::MemoryPool convertToV1_3(const hardware::hidl_memory& pool) { + V1_3::Request::MemoryPool ret; + ret.hidlMemory(pool); + return ret; +} + +V1_0::Request convertToV1_0(const V1_0::Request& request) { + return request; +} + +static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) { + hardware::hidl_vec pools(request.pools.size()); + std::transform(request.pools.begin(), request.pools.end(), pools.begin(), + [](const auto& pool) { return convertToV1_0(pool); }); + return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; +} + +V1_0::Request convertToV1_0(const V1_3::Request& request) { + if (!compliantWithV1_0(request)) { + LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) + << " from V1_3::Request to V1_0::Request of version 1.0"; + } + return uncheckedConvertToV1_0(request); +} + +V1_0::Request convertToV1_2(const V1_3::Request& request) { + if (!compliantWithV1_2(request)) { + LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) + << " from V1_3::Request to V1_0::Request of version 1.2"; + } + return uncheckedConvertToV1_0(request); +} + +V1_3::Request convertToV1_3(const V1_0::Request& request) { + hardware::hidl_vec pools(request.pools.size()); + std::transform(request.pools.begin(), request.pools.end(), pools.begin(), + [](const auto& pool) { return convertToV1_3(pool); }); + return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; +} + +V1_3::Request convertToV1_3(const V1_3::Request& request) { + return request; +} + +FenceState syncWait(int fd, int timeout) { + // This implementation is directly based on the ::sync_wait() implementation. + + struct pollfd fds; + int ret; + + if (fd < 0) { + errno = EINVAL; + return FenceState::UNKNOWN; + } + + fds.fd = fd; + fds.events = POLLIN; + + do { + ret = poll(&fds, 1, timeout); + if (ret > 0) { + if (fds.revents & POLLNVAL) { + errno = EINVAL; + return FenceState::UNKNOWN; + } + if (fds.revents & POLLERR) { + errno = EINVAL; + return FenceState::ERROR; + } + return FenceState::SIGNALED; + } else if (ret == 0) { + errno = ETIME; + return FenceState::ACTIVE; + } + } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); + + return FenceState::UNKNOWN; +} + +#ifdef NN_DEBUGGABLE +uint32_t getProp(const char* str, uint32_t defaultValue) { + const std::string propStr = android::base::GetProperty(str, ""); + if (propStr.size() > 0) { + return std::stoi(propStr); + } else { + return defaultValue; + } +} +#endif // NN_DEBUGGABLE + +ErrorStatus uncheckedConvert(V1_0::ErrorStatus status) { + return nnTryGetValue(convert(status)); +} + +ErrorStatus uncheckedConvert(V1_3::ErrorStatus status) { + return nnTryGetValue(convert(status)); +} + +OperandType uncheckedConvert(V1_3::OperandType operandType) { + return nnTryGetValue(convert(operandType)); +} + +OperationType uncheckedConvert(V1_3::OperationType operandType) { + return nnTryGetValue(convert(operandType)); +} + +Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime) { + return nnTryGetValue(convert(lifetime)); +} + +MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure) { + return nnTryGetValue(convert(measure)); +} + +DataLocation uncheckedConvert(const V1_0::DataLocation& location) { + return nnTryGetValue(convert(location)); +} + +Operand uncheckedConvert(const V1_3::Operand& operand) { + return nnTryGetValue(convert(operand)); +} + +Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params) { + return nnTryGetValue(convert(params)); +} + +Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params) { + return nnTryGetValue(convert(params)); +} + +Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec& params) { + return params; +} + +Operation uncheckedConvert(const V1_3::Operation& operation) { + return nnTryGetValue(convert(operation)); +} + +template +static std::vector convertVec(const hardware::hidl_vec& items) { + std::vector result(items.size()); + std::transform(items.begin(), items.end(), result.begin(), + [](const HalType& item) { return uncheckedConvert(item); }); + return result; +} + +Model uncheckedConvert(const V1_3::Model& model) { + return nnTryGetValue(convert(model)); +} + +Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph) { + return nnTryGetValue(convert(subgraph)); +} + +Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix& x) { + return nnTryGetValue(convert(x)); +} + +Request uncheckedConvert(const V1_3::Request& request) { + return nnTryGetValue(convert(request)); +} + +Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument) { + return nnTryGetValue(convert(requestArgument)); +} + +Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool) { + return nnTryGetValue(convert(memoryPool)); +} + +OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape) { + return nnTryGetValue(convert(outputShape)); +} + +std::vector uncheckedConvert( + const hardware::hidl_vec& outputShapes) { + return convertVec(outputShapes); +} + +Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities) { + return nnTryGetValue(convert(capabilities)); +} + +Capabilities::OperandPerformance uncheckedConvert( + const V1_3::Capabilities::OperandPerformance& operandPerformance) { + return nnTryGetValue(convert(operandPerformance)); +} + +Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo) { + return nnTryGetValue(convert(performanceInfo)); +} + +Extension uncheckedConvert(const V1_2::Extension& extension) { + return nnTryGetValue(convert(extension)); +} + +std::vector uncheckedConvert(const hardware::hidl_vec& extensions) { + return convertVec(extensions); +} + +Extension::OperandTypeInformation uncheckedConvert( + const V1_2::Extension::OperandTypeInformation& info) { + return nnTryGetValue(convert(info)); +} + +OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration) { + return nnTryGetValue(convert(timeoutDuration)); +} + +Timing uncheckedConvert(const V1_2::Timing& timing) { + return nnTryGetValue(convert(timing)); +} + +V1_0::ErrorStatus convertToV1_0(ErrorStatus status) { + return static_cast(static_cast(status)); +} + +V1_3::ErrorStatus convertToV1_3(ErrorStatus status) { + return nnTryGetValue(V1_3::utils::convert(status)); +} + +V1_3::OperandType convertToV1_3(OperandType operandType) { + return nnTryGetValue(V1_3::utils::convert(operandType)); +} + +V1_3::OperationType convertToV1_3(OperationType operandType) { + return nnTryGetValue(V1_3::utils::convert(operandType)); +} + +V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime) { + return nnTryGetValue(V1_3::utils::convert(lifetime)); +} + +V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference) { + return nnTryGetValue(V1_1::utils::convert(preference)); +} + +V1_3::Priority convertToV1_3(Priority priority) { + return nnTryGetValue(V1_3::utils::convert(priority)); +} + +V1_2::MeasureTiming convertToV1_2(MeasureTiming measure) { + return nnTryGetValue(V1_2::utils::convert(measure)); +} + +V1_0::DataLocation convertToV1_0(const DataLocation& location) { + return nnTryGetValue(V1_0::utils::convert(location)); +} + +V1_3::Operand convertToV1_3(const Operand& operand) { + return nnTryGetValue(V1_3::utils::convert(operand)); +} + +V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params) { + return nnTryGetValue(V1_2::utils::convert(params)); +} + +V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params) { + return nnTryGetValue(V1_2::utils::convert(params)); +} + +hardware::hidl_vec uncheckedConvert(const Operand::ExtensionParams& params) { + return params; +} + +V1_3::Operation convertToV1_3(const Operation& operation) { + return nnTryGetValue(V1_3::utils::convert(operation)); +} + +template +static hardware::hidl_vec convertVecToV1_0(const std::vector& items) { + hardware::hidl_vec result(items.size()); + std::transform(items.begin(), items.end(), result.begin(), + [](const CanonicalType& item) { return convertToV1_0(item); }); + return result; +} + +template +static hardware::hidl_vec convertVecToV1_2(const std::vector& items) { + hardware::hidl_vec result(items.size()); + std::transform(items.begin(), items.end(), result.begin(), + [](const CanonicalType& item) { return convertToV1_2(item); }); + return result; +} + +template +static hardware::hidl_vec convertVecToV1_3(const std::vector& items) { + hardware::hidl_vec result(items.size()); + std::transform(items.begin(), items.end(), result.begin(), + [](const CanonicalType& item) { return convertToV1_3(item); }); + return result; +} + +V1_2::OutputShape convertToV1_2(const OutputShape& outputShape) { + return nnTryGetValue(V1_2::utils::convert(outputShape)); +} + +hardware::hidl_vec convertToV1_2(const std::vector& outputShapes) { + return convertVecToV1_2(outputShapes); +} + +V1_3::Model convertToV1_3(const Model& model) { + return nnTryGetValue(V1_3::utils::convert(model)); +} + +V1_3::Subgraph convertToV1_3(const Model::Subgraph& subgraph) { + return nnTryGetValue(V1_3::utils::convert(subgraph)); +} + +V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix& x) { + return nnTryGetValue(V1_2::utils::convert(x)); +} + +V1_3::Request convertToV1_3(const Request& request) { + return nnTryGetValue(V1_3::utils::convert(request)); +} + +V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument) { + return nnTryGetValue(V1_0::utils::convert(requestArgument)); +} + +V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool) { + return nnTryGetValue(V1_3::utils::convert(memoryPool)); +} + +std::vector uncheckedConvert( + const hardware::hidl_vec& memoryPools) { + return convertVec(memoryPools); +} + +V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint) { + return nnTryGetValue(V1_3::utils::convert(timePoint)); +} + +V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration) { + return nnTryGetValue(V1_3::utils::convert(timeoutDuration)); +} + +V1_2::Timing convertToV1_2(const Timing& timing) { + return nnTryGetValue(V1_2::utils::convert(timing)); +} + +V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole) { + return nnTryGetValue(V1_3::utils::convert(bufferRole)); +} + +hardware::hidl_vec convertToV1_3(const std::vector& bufferRoles) { + return convertVecToV1_3(bufferRoles); +} + +hardware::hidl_vec convertToV1_0(const Model::OperandValues& operandValues) { + return nnTryGetValue(V1_0::utils::convert(operandValues)); +} + +hardware::hidl_memory convertToV1_0(const Memory& memory) { + return nnTryGetValue(V1_0::utils::convert(memory)); +} + +Memory uncheckedConvert(const hardware::hidl_memory& memory) { + return nnTryGetValue(convert(memory)); +} + +hardware::hidl_vec convertToV1_0(const std::vector& memories) { + return convertVecToV1_0(memories); +} + +std::vector uncheckedConvert(const hardware::hidl_vec& memories) { + return convertVec(memories); +} + +std::vector uncheckedConvert(const hardware::hidl_vec& subgraphs) { + return convertVec(subgraphs); +} + +std::vector uncheckedConvert(const hardware::hidl_vec& operands) { + return convertVec(operands); +} + +} // namespace nn +} // namespace android diff --git a/nn/common/LegacyUtils.cpp b/nn/common/LegacyUtils.cpp new file mode 100644 index 000000000..7417ed8bf --- /dev/null +++ b/nn/common/LegacyUtils.cpp @@ -0,0 +1,3565 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "Utils" + +#include "Utils.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ControlFlow.h" +#include "NeuralNetworks.h" +#include "NeuralNetworksOEM.h" +#include "OperationResolver.h" +#include "ValidateHal.h" +#include "nnapi/TypeUtils.h" + +namespace android { +namespace nn { + +constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX}; + +const char kVLogPropKey[] = "debug.nn.vlog"; +int vLogMask = ~0; + +// Split the space separated list of tags from verbose log setting and build the +// logging mask from it. note that '1' and 'all' are special cases to enable all +// verbose logging. +// +// NN API verbose logging setting comes from system property debug.nn.vlog. +// Example: +// setprop debug.nn.vlog 1 : enable all logging tags. +// setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and +// COMPILATION tags. +void initVLogMask() { + vLogMask = 0; + const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, ""); + if (vLogSetting.empty()) { + return; + } + + std::unordered_map vLogFlags = {{"1", -1}, + {"all", -1}, + {"model", MODEL}, + {"compilation", COMPILATION}, + {"execution", EXECUTION}, + {"cpuexe", CPUEXE}, + {"manager", MANAGER}, + {"driver", DRIVER}, + {"memory", MEMORY}}; + + std::vector elements = android::base::Split(vLogSetting, " ,:"); + for (const auto& elem : elements) { + const auto& flag = vLogFlags.find(elem); + if (flag == vLogFlags.end()) { + LOG(ERROR) << "Unknown trace flag: " << elem; + continue; + } + + if (flag->second == -1) { + // -1 is used for the special values "1" and "all" that enable all + // tracing. + vLogMask = ~0; + return; + } else { + vLogMask |= 1 << flag->second; + } + } +} + +TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds) { + // According to the standard, std::chrono::nanoseconds::rep is a signed + // integer type of at least 64 bits. This check prevents an overflow when + // rep is exactly 64 bits. + if constexpr (sizeof(std::chrono::nanoseconds::rep) == sizeof(int64_t)) { + nanoseconds = std::min(nanoseconds, + static_cast(std::chrono::nanoseconds::max().count())); + } + return std::chrono::nanoseconds{nanoseconds}; +} + +Deadline makeDeadline(TimeoutDuration duration) { + const auto maxTime = Deadline::max(); + const auto currentTime = std::chrono::steady_clock::now(); + + // If there would be an overflow, use the max value. + if (duration > maxTime - currentTime) { + return maxTime; + } + return currentTime + duration; +} + +static uint64_t getMaxNanosecondsSinceEpoch() { + const auto maxTime = + std::chrono::time_point::max(); + return maxTime.time_since_epoch().count(); +} + +std::optional makeDeadline(const V1_3::OptionalTimePoint& timePoint) { + using Discriminator = V1_3::OptionalTimePoint::hidl_discriminator; + if (timePoint.getDiscriminator() == Discriminator::none) { + return std::nullopt; + } + const uint64_t nanosecondsSinceEpoch = timePoint.nanosecondsSinceEpoch(); + const uint64_t maxNanosecondsSinceEpoch = getMaxNanosecondsSinceEpoch(); + + // Clamp time point to max. + if (nanosecondsSinceEpoch >= maxNanosecondsSinceEpoch) { + return Deadline::max(); + } + + // Return provided time point. + return Deadline{std::chrono::nanoseconds{nanosecondsSinceEpoch}}; +} + +bool hasDeadlinePassed(const std::optional& deadline) { + if (!deadline.has_value()) { + return false; + } + return std::chrono::steady_clock::now() >= *deadline; +} + +static OptionalTimePoint makeTimePoint(const Deadline& deadline) { + return deadline; +} + +OptionalTimePoint makeTimePoint(const std::optional& deadline) { + return deadline.has_value() ? makeTimePoint(*deadline) : OptionalTimePoint{}; +} + +static bool isExtensionOperandType(int32_t type) { + return (static_cast(type) >> kExtensionTypeBits) != 0; +} + +static bool isExtensionOperationType(ANeuralNetworksOperationType type) { + return (static_cast(type) >> kExtensionTypeBits) != 0; +} + +bool isExtensionOperandType(V1_3::OperandType type) { + return isExtensionOperandType(static_cast(type)); +} + +bool isExtensionOperationType(V1_3::OperationType type) { + return isExtensionOperationType(static_cast(type)); +} + +namespace { + +template +EntryType tableLookup(const EntryType (&table)[entryCount], + const EntryType (&tableOEM)[entryCountOEM], uint32_t code) { + if (code < entryCount) { + return table[code]; + } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) { + return tableOEM[code - kOEMCodeBase]; + } else { + nnAssert(!"tableLookup: bad code"); + return EntryType(); + } +} + +static Version convert(HalVersion halVersion) { + switch (halVersion) { + case HalVersion::UNKNOWN: + break; + case HalVersion::V1_0: + return Version::ANDROID_OC_MR1; + case HalVersion::V1_1: + return Version::ANDROID_P; + case HalVersion::V1_2: + return Version::ANDROID_Q; + case HalVersion::V1_3: + return Version::ANDROID_R; + } + LOG(FATAL) << "Cannot convert " << halVersion; + return {}; +} + +class OperationValidationContext : public IOperationValidationContext { + DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext); + + public: + OperationValidationContext(const char* operationName, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, const Operand* operands) + : operationName(operationName), + inputCount(inputCount), + inputIndexes(inputIndexes), + outputCount(outputCount), + outputIndexes(outputIndexes), + operands(operands) {} + + const char* getOperationName() const override; + + uint32_t getNumInputs() const override; + OperandType getInputType(uint32_t index) const override; + Shape getInputShape(uint32_t index) const override; + const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override; + + uint32_t getNumOutputs() const override; + OperandType getOutputType(uint32_t index) const override; + Shape getOutputShape(uint32_t index) const override; + + private: + const Operand* getInputOperand(uint32_t index) const; + const Operand* getOutputOperand(uint32_t index) const; + + const char* operationName; + uint32_t inputCount; + const uint32_t* inputIndexes; + uint32_t outputCount; + const uint32_t* outputIndexes; + const Operand* operands; + Version version; +}; + +const char* OperationValidationContext::getOperationName() const { + return operationName; +} + +const Operand* OperationValidationContext::getInputOperand(uint32_t index) const { + CHECK(index < static_cast(inputCount)); + return &operands[inputIndexes[index]]; +} + +const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const { + CHECK(index < static_cast(outputCount)); + return &operands[outputIndexes[index]]; +} + +uint32_t OperationValidationContext::getNumInputs() const { + return inputCount; +} + +uint32_t OperationValidationContext::getNumOutputs() const { + return outputCount; +} + +OperandType OperationValidationContext::getInputType(uint32_t index) const { + return getInputOperand(index)->type; +} + +Shape OperationValidationContext::getInputShape(uint32_t index) const { + const Operand* operand = getInputOperand(index); + return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, + operand->extraParams}; +} + +const Operand::ExtraParams& OperationValidationContext::getInputExtraParams(uint32_t index) const { + return getInputOperand(index)->extraParams; +} + +OperandType OperationValidationContext::getOutputType(uint32_t index) const { + return getOutputOperand(index)->type; +} + +Shape OperationValidationContext::getOutputShape(uint32_t index) const { + const Operand* operand = getOutputOperand(index); + return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, + operand->extraParams}; +} + +}; // anonymous namespace + +#define COUNT(X) (sizeof(X) / sizeof(X[0])) + +std::string getOperandTypeName(V1_3::OperandType type) { + return toString(type); +} + +std::string getOperationName(V1_3::OperationType type) { + return toString(type); +} + +const uint32_t kSizeOfDataType[]{ + 4, // ANEURALNETWORKS_FLOAT32 + 4, // ANEURALNETWORKS_INT32 + 4, // ANEURALNETWORKS_UINT32 + 4, // ANEURALNETWORKS_TENSOR_FLOAT32 + 4, // ANEURALNETWORKS_TENSOR_INT32 + 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM + 1, // ANEURALNETWORKS_BOOL + 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM + 2, // ANEURALNETWORKS_TENSOR_FLOAT16 + 1, // ANEURALNETWORKS_TENSOR_BOOL8 + 2, // ANEURALNETWORKS_FLOAT16 + 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL + 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM + 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM + 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED + 0, // ANEURALNETWORKS_MODEL +}; + +static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect"); + +const bool kScalarDataType[]{ + true, // ANEURALNETWORKS_FLOAT32 + true, // ANEURALNETWORKS_INT32 + true, // ANEURALNETWORKS_UINT32 + false, // ANEURALNETWORKS_TENSOR_FLOAT32 + false, // ANEURALNETWORKS_TENSOR_INT32 + false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM + true, // ANEURALNETWORKS_BOOL + false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM + false, // ANEURALNETWORKS_TENSOR_FLOAT16 + false, // ANEURALNETWORKS_TENSOR_BOOL8 + true, // ANEURALNETWORKS_FLOAT16 + false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL + false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM + false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM + false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED + true, // ANEURALNETWORKS_MODEL +}; + +static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect"); + +const uint32_t kSizeOfDataTypeOEM[]{ + 0, // ANEURALNETWORKS_OEM + 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE +}; + +static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM, + "kSizeOfDataTypeOEM is incorrect"); + +const bool kScalarDataTypeOEM[]{ + true, // ANEURALNETWORKS_OEM + false, // ANEURALNETWORKS_TENSOR_OEM_BYTE +}; + +static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM, + "kScalarDataTypeOEM is incorrect"); + +bool nonExtensionOperandTypeIsScalar(int type) { + CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported"; + return tableLookup(kScalarDataType, kScalarDataTypeOEM, type); +} + +uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions) { + const size_t size = getNonExtensionSize(type, dimensions).value(); + CHECK_LE(size, std::numeric_limits::max()); + return size; +} + +uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, + const std::vector& dimensions) { + return nonExtensionOperandSizeOfData(uncheckedConvert(type), dimensions); +} + +// Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t. +static std::pair sizeOfTensorDataHelper(uint32_t sizeOfElement, + const std::vector& dimensions) { + if (dimensions.empty()) { + return {false, 0}; + } + uint64_t size = static_cast(sizeOfElement); + constexpr uint64_t kMaxSize = static_cast(std::numeric_limits::max()); + for (uint32_t d : dimensions) { + size *= d; + if (size > kMaxSize) return {true, 0}; + } + return {false, static_cast(size)}; +} + +uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector& dimensions) { + const auto [overflow, size] = sizeOfTensorDataHelper(sizeOfElement, dimensions); + CHECK(!overflow); + return size; +} + +bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, + const std::vector& dimensions) { + CHECK(!isExtension(type)) << "Size of extension operand data is unknown"; + int n = static_cast(type); + uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n); + return tableLookup(kScalarDataType, kScalarDataTypeOEM, n) + ? false + : sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions); +} + +bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, + const std::vector& dimensions) { + return nonExtensionOperandSizeOfDataOverflowsUInt32(uncheckedConvert(type), dimensions); +} + +bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement, + const std::vector& dimensions) { + return sizeOfTensorDataHelper(sizeOfElement, dimensions).first; +} + +bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) { + if (!isExtensionOperandType(type)) { + CHECK(!nonExtensionOperandTypeIsScalar(type)) + << "A scalar type can never have unspecified dimensions"; + } + return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount); +} + +bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector& dimensions) { + return tensorHasUnspecifiedDimensions(static_cast(type), dimensions.data(), + dimensions.size()); +} + +bool tensorHasUnspecifiedDimensions(V1_3::OperandType type, + const std::vector& dimensions) { + return tensorHasUnspecifiedDimensions(static_cast(type), dimensions.data(), + dimensions.size()); +} + +bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) { + return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount); +} + +bool tensorHasUnspecifiedDimensions(const Operand& operand) { + return tensorHasUnspecifiedDimensions(operand.type, operand.dimensions); +} + +bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand) { + return tensorHasUnspecifiedDimensions(static_cast(operand.type), operand.dimensions.data(), + operand.dimensions.size()); +} + +uint32_t alignBytesNeeded(uint32_t index, size_t length) { + uint32_t pattern; + if (length < 2) { + pattern = 0; // No alignment necessary + } else if (length < 4) { + pattern = 1; // Align on 2-byte boundary + } else { + pattern = 3; // Align on 4-byte boundary + } + uint32_t extra = (~(index - 1)) & pattern; + return extra; +} + +void logModelToInfo(const V1_0::Model& model) { + LOG(INFO) << "V1_0::Model start"; + LOG(INFO) << "operands" << toString(model.operands); + LOG(INFO) << "operations" << toString(model.operations); + LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); + LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); + LOG(INFO) << "operandValues size" << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); +} + +void logModelToInfo(const V1_1::Model& model) { + LOG(INFO) << "V1_1::Model start"; + LOG(INFO) << "operands" << toString(model.operands); + LOG(INFO) << "operations" << toString(model.operations); + LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); + LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); + LOG(INFO) << "operandValues size " << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); +} + +void logModelToInfo(const V1_2::Model& model) { + LOG(INFO) << "V1_2::Model start"; + LOG(INFO) << "operands" << toString(model.operands); + LOG(INFO) << "operations" << toString(model.operations); + LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); + LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); + LOG(INFO) << "operandValues size" << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); + LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16; + LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); +} + +static void logSubgraphToInfo(std::string label, const V1_3::Subgraph& subgraph) { + LOG(INFO) << label << ".operands" << toString(subgraph.operands); + LOG(INFO) << label << ".operations" << toString(subgraph.operations); + LOG(INFO) << label << ".inputIndexes" << toString(subgraph.inputIndexes); + LOG(INFO) << label << ".outputIndexes" << toString(subgraph.outputIndexes); +} + +void logModelToInfo(const V1_3::Model& model) { + LOG(INFO) << "V1_3::Model start"; + logSubgraphToInfo("main", model.main); + for (uint32_t i = 0, n = model.referenced.size(); i < n; ++i) { + logSubgraphToInfo("referenced[" + std::to_string(i) + "]", model.referenced[i]); + } + LOG(INFO) << "operandValues size " << model.operandValues.size(); + LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); + LOG(INFO) << "relaxComputationFloat32toFloat16 " << model.relaxComputationFloat32toFloat16; + LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); +} + +void logModelToInfo(const Model& model) { + LOG(INFO) << "Model start"; + logModelToInfo(convertToV1_3(model)); +} + +bool validateOperandSymmPerChannelQuantParams( + const V1_3::Operand& halOperand, + const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag) { + if (halOperand.type != V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { + return false; + } + + NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag; + NN_RET_CHECK(channelQuant.scales != nullptr) << tag; + NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag; + NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u) + << tag << " channel dimension " << channelQuant.channelDim << " is underspecified"; + for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) { + NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]"; + } + return true; +} + +static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type"; + NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type"; + return true; +} + +static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255) + << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type, + const char* tag) { + NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127) + << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535) + << tag << " invalid zeroPoint: " << type.zeroPoint; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; + NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; + return true; +} + +static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) { + NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; + NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero"; + return true; +} + +static bool validateTensorDimensions( + const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, + bool allowPartial) { + if (!allowPartial) { + NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions"; + } + uint64_t size = + isExtensionOperandType(type.type) + ? extensionOperandTypeInfo->byteSize + : tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, static_cast(type.type)); + constexpr uint64_t kMaxSize = std::numeric_limits::max(); + for (uint32_t i = 0; i < type.dimensionCount; i++) { + if (!allowPartial) { + NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions"; + } + if (type.dimensions[i] != 0) { + size *= type.dimensions[i]; + NN_RET_CHECK_LE(size, kMaxSize) << tag << " operand byte size exceeds " << kMaxSize; + } + } + return true; +} + +static bool validateOperandTypeHelper( + const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, + bool allowPartial) { + NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr); + if (isExtensionOperandType(type.type)) { + NN_RET_CHECK(extensionOperandTypeInfo != nullptr); + if (extensionOperandTypeInfo->isTensor) { + NN_RET_CHECK( + validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); + } else { + NN_RET_CHECK(validateScalarDimensions(type, tag)); + } + return validateNoQuantParams(type, tag); + } + + NN_RET_CHECK(extensionOperandTypeInfo == nullptr); + NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type)) + << tag << " invalid OperandType: " << type.type; + + bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type); + if (isScalar) { + NN_RET_CHECK(validateScalarDimensions(type, tag)); + if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types + // to use quantization parameters. + NN_RET_CHECK(validateNoQuantParams(type, tag)); + } + } else { + NN_RET_CHECK(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); + if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) { + NN_RET_CHECK(validateQuant8AsymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) { + NN_RET_CHECK(validateQuant8SymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) { + NN_RET_CHECK(validateQuant16AsymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) { + NN_RET_CHECK(validateQuantSymmParams(type, tag)); + } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) { + // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters. + } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) { + // Historically, we have allowed OEM types to use quantization parameters. + } else { + NN_RET_CHECK(validateNoQuantParams(type, tag)); + } + } + + return true; +} + +int validateOperandType(const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, + const char* tag, bool allowPartial) { + return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial) + ? ANEURALNETWORKS_NO_ERROR + : ANEURALNETWORKS_BAD_DATA; +} + +int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, + const char* tag) { + for (uint32_t i = 0; i < count; i++) { + if (list[i] >= operandCount) { + LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i] + << ", operandCount " << operandCount; + return ANEURALNETWORKS_BAD_DATA; + } + } + return ANEURALNETWORKS_NO_ERROR; +} + +int validateOperationOperandTypes(const std::vector& operands, uint32_t inOperandCount, + const uint32_t* inOperandIndexes, + const std::vector& inExpectedTypes, + uint32_t outOperandCount, const uint32_t* outOperandIndexes, + const std::vector& outExpectedInTypes) { + if (inOperandCount != static_cast(inExpectedTypes.size()) || + outOperandCount != static_cast(outExpectedInTypes.size())) { + LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and " + << outExpectedInTypes.size() << " outputs," + << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs"; + return ANEURALNETWORKS_BAD_DATA; + } + for (uint32_t i = 0; i < inOperandCount; i++) { + if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) { + LOG(ERROR) << "Invalid input tensor type " << operands[inOperandIndexes[i]].type + << " for input " << i << ", expected " << inExpectedTypes[i]; + return ANEURALNETWORKS_BAD_DATA; + } + } + for (uint32_t i = 0; i < outOperandCount; i++) { + if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) { + LOG(ERROR) << "Invalid output tensor type " << operands[outOperandIndexes[i]].type + << " for input " << i << ", expected " << outExpectedInTypes[i]; + return ANEURALNETWORKS_BAD_DATA; + } + } + + return ANEURALNETWORKS_NO_ERROR; +} + +static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion, + HalVersion minSupportedHalVersion) { + if (halVersion < minSupportedHalVersion) { + LOG(ERROR) << "The given inputs and outputs for operation " << opType + << " are only supported in " << minSupportedHalVersion + << " and later (validating using " << halVersion << ")"; + return ANEURALNETWORKS_BAD_DATA; + } + return ANEURALNETWORKS_NO_ERROR; +} + +// Checks if two operands have the same types, ranks (if specified), dimensions +// (if specified), scales, zeroPoints, and extraParams. +static bool compatible(const Operand& a, const Operand& b) { + NN_RET_CHECK(a.type == b.type) << a.type << " != " << b.type; + if (a.dimensions.size() != 0 && b.dimensions.size() != 0) { + NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions"; + for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) { + if (a.dimensions[i] != 0 && b.dimensions[i] != 0) { + NN_RET_CHECK_EQ(a.dimensions[i], b.dimensions[i]) << "Incompatible dimensions"; + } + } + } + NN_RET_CHECK_EQ(a.scale, b.scale); + NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint); + NN_RET_CHECK(a.extraParams == b.extraParams) << a.extraParams << " != " << b.extraParams; + return true; +} + +static bool validateConditionOperand(const Operand& operand) { + NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8) + << "Unexpected condition operand type: " << operand.type; + NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton"; + NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton"; + return true; +} + +static void checkSubgraphValidationHelper(const SubgraphValidationHelper& helper) { + CHECK(helper.isValidSubgraphReference != nullptr); + CHECK(helper.getSubgraphInputCount != nullptr); + CHECK(helper.getSubgraphOutputCount != nullptr); + CHECK(helper.getSubgraphInputOperand != nullptr); + CHECK(helper.getSubgraphOutputOperand != nullptr); +} + +static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, + const uint32_t* outputs, const std::vector& operands, + const SubgraphValidationHelper& helper) { + namespace op = operation_if; + checkSubgraphValidationHelper(helper); + NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_IF must have at least 3 inputs"; + NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_IF must have at least 1 output"; + auto validateBranchOperand = [&](const Operand& branchModelOperand) -> bool { + NN_RET_CHECK(helper.isValidSubgraphReference(branchModelOperand)) + << "Operand is not a valid subgraph reference"; + const uint32_t branchModelInputCount = helper.getSubgraphInputCount(branchModelOperand); + const uint32_t branchModelOutputCount = helper.getSubgraphOutputCount(branchModelOperand); + NN_RET_CHECK_EQ(inputCount, op::kFirstInput + branchModelInputCount); + NN_RET_CHECK_EQ(outputCount, branchModelOutputCount); + for (uint32_t i = 0; i < branchModelInputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphInputOperand(branchModelOperand, i); + const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + } + for (uint32_t i = 0; i < branchModelOutputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphOutputOperand(branchModelOperand, i); + const Operand& outerOperand = operands[outputs[i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + } + return true; + }; + NN_RET_CHECK(validateConditionOperand(operands[inputs[op::kCondBoolOperand]])) + << "Validation failed for IF condition operand"; + NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kThenModelOperand]])) + << "Validation failed for IF then model"; + NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kElseModelOperand]])) + << "Validation failed for IF else model"; + return true; +} + +static bool validateControlFlowOperandUnknownSize(const SubgraphValidationHelper& helper, + const Operand& operand) { + if (!helper.allowControlFlowOperationWithOperandOfUnknownSize && !isExtension(operand.type)) { + NN_RET_CHECK_NE(nonExtensionOperandSizeOfData(operand.type, operand.dimensions), 0u); + } + return true; +} + +static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs, + uint32_t outputCount, const uint32_t* outputs, + const std::vector& operands, + const SubgraphValidationHelper& helper) { + // Let the loop have + // - m >= 1 input-output operands, + // - k >= 0 state-only operands, and + // - n >= 0 input-only operands. + // Then + // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs. + // - the condition model has (m + k + n) inputs and 1 output. + // - the body model has (m + k + n) inputs and (m + k) outputs. + namespace op = operation_while; + checkSubgraphValidationHelper(helper); + NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_WHILE must have at least 3 inputs"; + NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_WHILE must have at least 1 output"; + auto validateCondOperand = [&](const Operand& condModelOperand) -> bool { + NN_RET_CHECK(helper.isValidSubgraphReference(condModelOperand)) + << "Operand is not a valid subgraph reference"; + const uint32_t condModelInputCount = helper.getSubgraphInputCount(condModelOperand); + const uint32_t condModelOutputCount = helper.getSubgraphOutputCount(condModelOperand); + NN_RET_CHECK_EQ(inputCount, op::kFirstInput + condModelInputCount); + NN_RET_CHECK_EQ(condModelOutputCount, 1u); + for (uint32_t i = 0; i < condModelInputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphInputOperand(condModelOperand, i); + const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); + } + NN_RET_CHECK( + validateConditionOperand(*helper.getSubgraphOutputOperand(condModelOperand, 0))); + return true; + }; + auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> bool { + NN_RET_CHECK(helper.isValidSubgraphReference(bodyModelOperand)) + << "Operand is not a valid subgraph reference"; + const uint32_t bodyModelInputCount = helper.getSubgraphInputCount(bodyModelOperand); + const uint32_t bodyModelOutputCount = helper.getSubgraphOutputCount(bodyModelOperand); + NN_RET_CHECK_EQ(inputCount, op::kFirstInput + bodyModelInputCount); + NN_RET_CHECK_GE(bodyModelOutputCount, outputCount); + NN_RET_CHECK_GE(bodyModelInputCount, bodyModelOutputCount); + const uint32_t inputOutputCount = outputCount; + const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount; + const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount; + for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) { + const Operand& innerOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); + const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); + } + for (uint32_t i = 0; i < inputOutputCount; ++i) { + const Operand& innerOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); + const Operand& outerOperand = operands[outputs[i]]; + NN_RET_CHECK(compatible(innerOperand, outerOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); + } + for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) { + const Operand& inputOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); + const Operand& outputOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); + NN_RET_CHECK(compatible(inputOperand, outputOperand)); + NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outputOperand)); + } + return true; + }; + NN_RET_CHECK(validateCondOperand(operands[inputs[op::kCondModelOperand]])) + << "Validation failed for WHILE condition model"; + NN_RET_CHECK(validateBodyOperand(operands[inputs[op::kBodyModelOperand]])) + << "Validation failed for WHILE body model"; + return true; +} + +static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, + const std::vector& operands, HalVersion halVersion) { + if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + LOG(ERROR) << "This validateOperation() overload does not support control flow"; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperation(opType, inputCount, inputIndexes, outputCount, outputIndexes, operands, + halVersion, {}); +} + +int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, const std::vector& operands, + HalVersion halVersion, const SubgraphValidationHelper& helper) { + NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes, + static_cast(operands.size()), + "ANeuralNetworksModel_addOperation inputs")); + NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes, + static_cast(operands.size()), + "ANeuralNetworksModel_addOperation outputs")); + + if (isExtensionOperationType(opType)) { + if (halVersion < HalVersion::V1_2) { + LOG(ERROR) + << "Extension operations are supported since HAL version 1.2, validating using " + << halVersion; + return ANEURALNETWORKS_BAD_DATA; + } + // There is no other validation we can do for an extension operation. + return ANEURALNETWORKS_NO_ERROR; + } + + auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn + << ") or output operands (" << outputCount << ", expected " << expOut + << ") for operation " << opType; + }; + + switch (opType) { + case ANEURALNETWORKS_OEM_OPERATION: { + return ANEURALNETWORKS_NO_ERROR; + } + case ANEURALNETWORKS_RESHAPE: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + OperandType::TENSOR_INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_DEPTH_TO_SPACE: { + if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 3 or 2) or output operands (" << outputCount + << ", expected 1) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 3) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SPACE_TO_DEPTH: { + if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 3 or 2) or output operands (" << outputCount + << ", expected 1) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 3) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_EMBEDDING_LOOKUP: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[1]].type; + if (inputType != OperandType::TENSOR_FLOAT16 && + inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM && + inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes = {OperandType::TENSOR_INT32, inputType}; + std::vector outExpectedTypes = {inputType}; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else if (inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_HASHTABLE_LOOKUP: { + if (inputCount != 3 || outputCount != 2) { + logInvalidInOutNumber(3, 2); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[2]].type; + if (inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes = {OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, inputType}; + std::vector outExpectedTypes = {inputType, + OperandType::TENSOR_QUANT8_ASYMM}; + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_LSH_PROJECTION: { + if (inputCount != 4 || outputCount != 1) { + logInvalidInOutNumber(4, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[1]].type; + if (inputType != OperandType::TENSOR_FLOAT16 && + inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto hashType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + if (hashType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + inputType, + OperandType::TENSOR_FLOAT16, + OperandType::INT32, + }; + } else if (hashType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + inputType, + OperandType::TENSOR_FLOAT32, + OperandType::INT32, + }; + } else { + LOG(ERROR) << "Unsupported hash tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector outExpectedTypes = {OperandType::TENSOR_INT32}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: { + const uint32_t kNumOutputs = 2; + const uint32_t kNumOutputsMerged = 1; + const uint32_t kNumOutputsWithState = 6; + const uint32_t kNumOutputsMergedWithState = 5; + if (inputCount != 61 || + (outputCount != kNumOutputs && outputCount != kNumOutputsMerged && + outputCount != kNumOutputsWithState && + outputCount != kNumOutputsMergedWithState)) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 61) or output operands (" << outputCount + << ", expected 1, 2, 5 or 6) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + std::vector inExpectedTypes; + auto inputType = operands[inputIndexes[0]].type; + if (inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_FLOAT16) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + inExpectedTypes = {}; + for (int i = 0; i < 48; ++i) { + inExpectedTypes.push_back(inputType); + } + inExpectedTypes.push_back(OperandType::INT32); + inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 + ? OperandType::FLOAT32 + : OperandType::FLOAT16); + inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 + ? OperandType::FLOAT32 + : OperandType::FLOAT16); + inExpectedTypes.push_back(OperandType::BOOL); + inExpectedTypes.push_back(OperandType::BOOL); + for (int i = 0; i < 8; ++i) { + inExpectedTypes.push_back(inputType); + } + + HalVersion minSupportedHalVersion = HalVersion::V1_2; + if (outputCount == kNumOutputsWithState || outputCount == kNumOutputsMergedWithState) { + minSupportedHalVersion = HalVersion::V1_3; + } + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, minSupportedHalVersion)); + std::vector outExpectedTypes(outputCount, inputType); + auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + return status; + } + case ANEURALNETWORKS_LSTM: { + if ((inputCount != 23 && inputCount != 27) || outputCount != 4) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 23 or 27) or output operands (" << outputCount + << ", expected 4) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes; + std::vector outExpectedTypes; + auto inputType = operands[inputIndexes[0]].type; + if (inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_FLOAT16) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + inExpectedTypes = {inputType, inputType, inputType, inputType, inputType, + inputType, inputType, inputType, inputType, inputType, + inputType, inputType, inputType, inputType, inputType, + inputType, inputType, inputType, inputType, inputType, + OperandType::INT32}; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes.push_back(OperandType::FLOAT32); + inExpectedTypes.push_back(OperandType::FLOAT32); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes.push_back(OperandType::FLOAT16); + inExpectedTypes.push_back(OperandType::FLOAT16); + } + + outExpectedTypes = {inputType, inputType, inputType, inputType}; + if (inputCount == 23) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + for (int i = 0; i < 4; ++i) { + inExpectedTypes.push_back(inputType); + } + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: { + if (inputCount != 15 || outputCount != 2) { + logInvalidInOutNumber(15, 2); + return ANEURALNETWORKS_BAD_DATA; + } + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + std::vector inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM, + OperandType::TENSOR_QUANT8_ASYMM}; + std::vector outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM, + OperandType::TENSOR_QUANT8_ASYMM}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_RANDOM_MULTINOMIAL: { + if (inputCount != 3 || outputCount != 1) { + logInvalidInOutNumber(3, 1); + return ANEURALNETWORKS_BAD_DATA; + } + OperandType inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + inputType, + OperandType::INT32, + OperandType::TENSOR_INT32, + }; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector outExpectedTypes = {OperandType::TENSOR_INT32}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_RNN: { + if (inputCount != 6 || outputCount != 2) { + logInvalidInOutNumber(6, 2); + return ANEURALNETWORKS_BAD_DATA; + } + OperandType inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, OperandType::INT32, + }; + outExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, + }; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, OperandType::INT32, + }; + outExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, + }; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SVDF: { + if (inputCount != 7 || outputCount != 2) { + logInvalidInOutNumber(7, 2); + return ANEURALNETWORKS_BAD_DATA; + } + OperandType inputType = operands[inputIndexes[0]].type; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); + + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes = { + inputType, inputType, inputType, inputType, + inputType, OperandType::INT32, OperandType::INT32, + }; + std::vector outExpectedTypes = {inputType, inputType}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_BATCH_TO_SPACE_ND: { + if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 3 or 2) or output operands (" << outputCount + << ", expected 1) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 3) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SPACE_TO_BATCH_ND: { + if ((inputCount != 4 && inputCount != 3) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 4 or 3) or output operands (" << outputCount + << ", expected 1) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + if (operands[inputIndexes[0]].zeroPoint != 0) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + inExpectedTypes = { + OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + OperandType::TENSOR_INT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputCount == 4) { + inExpectedTypes.push_back(OperandType::BOOL); + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_PAD: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + if (operands[inputIndexes[0]].zeroPoint == 0) { + NN_RETURN_IF_ERROR( + validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } else { + NN_RETURN_IF_ERROR( + validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + } + inExpectedTypes = { + inputType, + OperandType::TENSOR_INT32, + }; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_PAD_V2: { + if (inputCount != 3 || outputCount != 1) { + logInvalidInOutNumber(3, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_INT32, + OperandType::FLOAT32, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + inExpectedTypes = { + OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_INT32, + OperandType::FLOAT16, + }; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + inExpectedTypes = { + inputType, + OperandType::TENSOR_INT32, + OperandType::INT32, + }; // TODO(b/116699425): Make it UINT8. + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_CAST: { + if (inputCount != 1 || outputCount != 1) { + logInvalidInOutNumber(1, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputOperand = operands[inputIndexes[0]]; + auto outputOperand = operands[outputIndexes[0]]; + auto inputType = inputOperand.type; + auto outputType = outputOperand.type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if ((inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM) && + (outputType == OperandType::TENSOR_FLOAT16 || + outputType == OperandType::TENSOR_FLOAT32 || + outputType == OperandType::TENSOR_INT32 || + outputType == OperandType::TENSOR_QUANT8_ASYMM)) { + inExpectedTypes = {inputType}; + outExpectedTypes = {outputType}; + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else if (inputType == OperandType::TENSOR_BOOL8 || + inputType == OperandType::TENSOR_QUANT16_ASYMM || + inputType == OperandType::TENSOR_QUANT16_SYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || + inputType == OperandType::TENSOR_QUANT8_SYMM) { + inExpectedTypes = {inputType}; + outExpectedTypes = {inputType}; // Only identity CAST is supported. + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + LOG(ERROR) << "Unsupported data type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + // Validate that output shape is equal to input shape if dimensions + // are already known. + auto getNumberOfElements = [](const hardware::hidl_vec& dims) { + if (dims.size() == 0) { + return 0; + } + return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<>()); + }; + if (inputOperand.dimensions.size() != 0 && outputOperand.dimensions.size() != 0 && + getNumberOfElements(outputOperand.dimensions) != 0 && + inputOperand.dimensions != outputOperand.dimensions) { + return ANEURALNETWORKS_BAD_DATA; + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_MEAN: { + if (inputCount != 3 || outputCount != 1) { + logInvalidInOutNumber(3, 1); + return ANEURALNETWORKS_BAD_DATA; + } + const auto inputRank = operands[inputIndexes[0]].dimensions.size(); + if (inputRank > 4) { + LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + if (inputType == OperandType::TENSOR_FLOAT32) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } else if (inputType == OperandType::TENSOR_FLOAT16) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes = {inputType, OperandType::TENSOR_INT32, + OperandType::INT32}; + std::vector outExpectedTypes = {inputType}; + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_ARGMAX: + case ANEURALNETWORKS_ARGMIN: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_INT32}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_EXPAND_DIMS: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, OperandType::INT32}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_SPLIT: { + if (inputCount != 3) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)" + << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + if (inputType != OperandType::TENSOR_FLOAT16 && + inputType != OperandType::TENSOR_FLOAT32 && + inputType != OperandType::TENSOR_INT32 && + inputType != OperandType::TENSOR_QUANT8_ASYMM && + inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + std::vector inExpectedTypes = {inputType, OperandType::INT32, + OperandType::INT32}; + std::vector outExpectedTypes(outputCount, inputType); + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_MAXIMUM: + case ANEURALNETWORKS_MINIMUM: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + std::vector inExpectedTypes; + std::vector outExpectedTypes; + OperandType inputType = operands[inputIndexes[0]].type; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, inputType}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_GROUPED_CONV_2D: { + if ((inputCount != 12 && inputCount != 9) || outputCount != 1) { + LOG(ERROR) << "Invalid number of input operands (" << inputCount + << ", expected 12 or 9) or output operands (" << outputCount + << ", expected 1) for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + auto filterType = operands[inputIndexes[1]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, + OperandType::TENSOR_FLOAT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT32}; + } else if (inputType == OperandType::TENSOR_FLOAT16) { + inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, + OperandType::TENSOR_FLOAT16, OperandType::INT32, + OperandType::INT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32}; + outExpectedTypes = {OperandType::TENSOR_FLOAT16}; + } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + if (filterType != inputType && + filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { + LOG(ERROR) << "Unsupported filter tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL && + std::get( + operands[inputIndexes[1]].extraParams) + .channelDim != 0) { + LOG(ERROR) << "Unsupported filter tensor channel dimension for operation " + << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + inExpectedTypes = { + inputType, filterType, OperandType::TENSOR_INT32, + OperandType::INT32, OperandType::INT32, OperandType::INT32, + OperandType::INT32, OperandType::INT32}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + + if (inputCount == 12) { + std::vector explicitScalarTypes(3, OperandType::INT32); + inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(), + explicitScalarTypes.end()); + } + inExpectedTypes.push_back(OperandType::BOOL); + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_TILE: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32 || + inputType == OperandType::TENSOR_INT32 || + inputType == OperandType::TENSOR_QUANT8_ASYMM || + inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + inExpectedTypes = {inputType, OperandType::TENSOR_INT32}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_POW: { + if (inputCount != 2 || outputCount != 1) { + logInvalidInOutNumber(2, 1); + return ANEURALNETWORKS_BAD_DATA; + } + auto inputType = operands[inputIndexes[0]].type; + std::vector inExpectedTypes; + std::vector outExpectedTypes; + if (inputType == OperandType::TENSOR_FLOAT16 || + inputType == OperandType::TENSOR_FLOAT32) { + inExpectedTypes = {inputType, inputType}; + outExpectedTypes = {inputType}; + } else { + LOG(ERROR) << "Unsupported input tensor type for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + } else { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); + } + return validateOperationOperandTypes(operands, inputCount, inputIndexes, + inExpectedTypes, outputCount, outputIndexes, + outExpectedTypes); + } + case ANEURALNETWORKS_IF: { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + return validateIfOperation(inputCount, inputIndexes, outputCount, outputIndexes, + operands, helper) + ? ANEURALNETWORKS_NO_ERROR + : ANEURALNETWORKS_BAD_DATA; + } + case ANEURALNETWORKS_WHILE: { + NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); + return validateWhileOperation(inputCount, inputIndexes, outputCount, outputIndexes, + operands, helper) + ? ANEURALNETWORKS_NO_ERROR + : ANEURALNETWORKS_BAD_DATA; + } + default: { + const OperationRegistration* operationRegistration = + BuiltinOperationResolver::get()->findOperation( + static_cast(opType)); + if (operationRegistration == nullptr) { + if (0 <= opType && opType < kNumberOfOperationTypes) { + LOG(ERROR) << opType << " not registered"; + } else { + LOG(ERROR) << "Operation type " << opType << " out of the range [0, " + << kNumberOfOperationTypes << ")"; + } + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + if (operationRegistration->validate == nullptr) { + LOG(ERROR) << "Incomplete operation registration: " << opType; + return ANEURALNETWORKS_UNEXPECTED_NULL; + } + OperationValidationContext context(operationRegistration->name, inputCount, + inputIndexes, outputCount, outputIndexes, + operands.data()); + const auto maybeVersion = operationRegistration->validate(&context); + if (!maybeVersion.has_value()) { + LOG(ERROR) << "Validation failed for operation " << opType << ": " + << maybeVersion.error(); + return ANEURALNETWORKS_BAD_DATA; + } + if (!validateVersion(&context, convert(halVersion), maybeVersion.value())) { + LOG(ERROR) << "Validation failed for operation " << opType; + return ANEURALNETWORKS_BAD_DATA; + } + return ANEURALNETWORKS_NO_ERROR; + } + } +} + +ErrorStatus convertResultCodeToErrorStatus(int resultCode) { + switch (resultCode) { + case ANEURALNETWORKS_NO_ERROR: + return ErrorStatus::NONE; + + case ANEURALNETWORKS_BAD_DATA: + case ANEURALNETWORKS_UNEXPECTED_NULL: + return ErrorStatus::INVALID_ARGUMENT; + + case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE: + return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; + + case ANEURALNETWORKS_UNAVAILABLE_DEVICE: + return ErrorStatus::DEVICE_UNAVAILABLE; + + case ANEURALNETWORKS_BAD_STATE: + case ANEURALNETWORKS_INCOMPLETE: + case ANEURALNETWORKS_OP_FAILED: + case ANEURALNETWORKS_OUT_OF_MEMORY: + case ANEURALNETWORKS_UNMAPPABLE: + case ANEURALNETWORKS_DEAD_OBJECT: + return ErrorStatus::GENERAL_FAILURE; + + case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT: + return ErrorStatus::MISSED_DEADLINE_TRANSIENT; + case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT: + return ErrorStatus::MISSED_DEADLINE_PERSISTENT; + case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT: + return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT; + case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT: + return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT; + } + LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE"; + return ErrorStatus::GENERAL_FAILURE; +} + +int convertErrorStatusToResultCode(ErrorStatus status) { + switch (status) { + case ErrorStatus::NONE: + return ANEURALNETWORKS_NO_ERROR; + case ErrorStatus::DEVICE_UNAVAILABLE: + return ANEURALNETWORKS_UNAVAILABLE_DEVICE; + case ErrorStatus::GENERAL_FAILURE: + return ANEURALNETWORKS_OP_FAILED; + case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: + return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE; + case ErrorStatus::INVALID_ARGUMENT: + return ANEURALNETWORKS_BAD_DATA; + case ErrorStatus::MISSED_DEADLINE_TRANSIENT: + return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT; + case ErrorStatus::MISSED_DEADLINE_PERSISTENT: + return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT; + case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: + return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT; + case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: + return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT; + case ErrorStatus::DEAD_OBJECT: + return ANEURALNETWORKS_DEAD_OBJECT; + } + LOG(ERROR) << "Unknown ErrorStatus " << status << " mapped to ANEURALNETWORKS_OP_FAILED"; + return ANEURALNETWORKS_OP_FAILED; +} + +V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode) { + return convertToV1_3(convertResultCodeToErrorStatus(resultCode)); +} + +int convertErrorStatusToResultCode(V1_3::ErrorStatus status) { + return convertErrorStatusToResultCode(uncheckedConvert(status)); +} + +std::tuple, Timing> getExecutionResult( + V1_3::ErrorStatus status, const hardware::hidl_vec& outputShapes, + const V1_2::Timing& timing) { + return getExecutionResult(uncheckedConvert(status), uncheckedConvert(outputShapes), + uncheckedConvert(timing)); +} + +std::tuple, Timing> getExecutionResult( + ErrorStatus status, std::vector outputShapes, Timing timing) { + constexpr Timing kNoTiming = {std::numeric_limits::max(), + std::numeric_limits::max()}; + const int n = convertErrorStatusToResultCode(status); + if (status != ErrorStatus::NONE && status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE && + !outputShapes.empty()) { + LOG(ERROR) << "The driver returned OutputShapes when it shouldn't."; + outputShapes.clear(); + } + if (status != ErrorStatus::NONE && timing != kNoTiming) { + LOG(ERROR) << "The driver returned Timing when it shouldn't."; + timing = kNoTiming; + } + return {n, std::move(outputShapes), timing}; +} + +// Capabilities::operandPerformance utilities. +// The field Capabilities::operandPerformance is a vector sorted by the field +// Capabilities::OperandPerformance::type. + +template +hardware::hidl_vec> nonExtensionOperandPerformance( + V1_0::PerformanceInfo perf) { + using OpPerf = VersionedOperandPerformance; + + // Note: range presents enumerators in declaration order, not in numerical order. + static constexpr hardware::hidl_enum_range> kOperandTypeRange; + + std::vector ret; + ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin()); + for (VersionedOperandType type : kOperandTypeRange) { + if (static_cast(type) != V1_3::OperandType::SUBGRAPH) { + ret.push_back(OpPerf{type, perf}); + } + } + std::sort(ret.begin(), ret.end(), + [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; }); + + return ret; +} + +template hardware::hidl_vec +nonExtensionOperandPerformance(V1_0::PerformanceInfo perf); +template hardware::hidl_vec +nonExtensionOperandPerformance(V1_0::PerformanceInfo perf); + +template +void update(hardware::hidl_vec>* operandPerformance, + VersionedOperandType type, V1_0::PerformanceInfo perf) { + CHECK(operandPerformance != nullptr); + const auto it = + std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type, + [](const VersionedOperandPerformance& perf, + VersionedOperandType type) { return perf.type < type; }); + CHECK(it != operandPerformance->end()) + << toString(type) << " not in " << toString(*operandPerformance); + it->info = perf; +} + +void update(hardware::hidl_vec* operandPerformance, + V1_2::OperandType type, V1_0::PerformanceInfo perf) { + update(operandPerformance, type, perf); +} +void update(hardware::hidl_vec* operandPerformance, + V1_3::OperandType type, V1_0::PerformanceInfo perf) { + update(operandPerformance, type, perf); +} + +template +V1_0::PerformanceInfo lookup( + const hardware::hidl_vec>& operandPerformance, + VersionedOperandType type) { + const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type, + [](const VersionedOperandPerformance& perf, + VersionedOperandType type) { + return static_cast(perf.type) < + static_cast(type); + }); + if (it == operandPerformance.end()) { + LOG(WARNING) << "No PerformanceInfo for " << toString(type); + return kNoPerformanceInfo; + } else { + return it->info; + } +} + +V1_0::PerformanceInfo lookup( + const hardware::hidl_vec& operandPerformance, + V1_2::OperandType type) { + return lookup(operandPerformance, type); +} +V1_0::PerformanceInfo lookup( + const hardware::hidl_vec& operandPerformance, + V1_3::OperandType type) { + CHECK(type != V1_3::OperandType::SUBGRAPH) + << "Use Capabilities::ifPerformance or Capabilities::whilePerformance"; + return lookup(operandPerformance, type); +} + +// Versioning + +// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM. +// This array must be in sorted order. +static const V1_3::OperandType kQuantized8PerformanceConsistentWithP[] = { + V1_3::OperandType::INT32, V1_3::OperandType::UINT32, V1_3::OperandType::TENSOR_INT32, + V1_3::OperandType::OEM, V1_3::OperandType::TENSOR_OEM_BYTE}; + +static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) { + const V1_0::PerformanceInfo quantized8Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM); + return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), + std::end(kQuantized8PerformanceConsistentWithP), + [quantized8Performance, &capabilities](V1_3::OperandType type) { + return quantized8Performance == + lookup(capabilities.operandPerformance, + static_cast(type)); + }); +} + +static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) { + const V1_0::PerformanceInfo quantized8Performance = + lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM); + return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), + std::end(kQuantized8PerformanceConsistentWithP), + [quantized8Performance, &capabilities](V1_3::OperandType type) { + return quantized8Performance == + lookup(capabilities.operandPerformance, type); + }); +} + +static hardware::hidl_vec +makeQuantized8PerformanceConsistentWithP(V1_0::PerformanceInfo quantized8Performance) { + hardware::hidl_vec ret( + std::size(kQuantized8PerformanceConsistentWithP)); + std::transform(std::begin(kQuantized8PerformanceConsistentWithP), + std::end(kQuantized8PerformanceConsistentWithP), ret.begin(), + [quantized8Performance]( + V1_3::OperandType type) -> V1_2::Capabilities::OperandPerformance { + return {static_cast(type), quantized8Performance}; + }); + return ret; +} + +bool compliantWithV1_0(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_0(const V1_1::Capabilities& capabilities) { + return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance; +} + +bool compliantWithV1_0(const V1_2::Capabilities& capabilities) { + const V1_0::PerformanceInfo perfTensorFloat32 = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32); + const V1_0::PerformanceInfo perfFloat32 = + lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32); + if (perfTensorFloat32 != perfFloat32 || + perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || + perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_0(const V1_3::Capabilities& capabilities) { + const V1_0::PerformanceInfo perfTensorFloat32 = + lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32); + const V1_0::PerformanceInfo perfFloat32 = + lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32); + if (perfTensorFloat32 != perfFloat32 || + perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || + perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_1(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_1(const V1_1::Capabilities&) { + return true; +} + +bool compliantWithV1_1(const V1_2::Capabilities& capabilities) { + if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != + capabilities.relaxedFloat32toFloat16PerformanceScalar) || + (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) != + lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_1(const V1_3::Capabilities& capabilities) { + if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != + capabilities.relaxedFloat32toFloat16PerformanceScalar) || + (lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32) != + lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32))) { + return false; + } + + return isQuantized8PerformanceConsistentWithP(capabilities); +} + +bool compliantWithV1_2(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_2(const V1_1::Capabilities&) { + return true; +} + +bool compliantWithV1_2(const V1_2::Capabilities&) { + return true; +} + +bool compliantWithV1_2(const V1_3::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_0::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_1::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_2::Capabilities&) { + return true; +} + +bool compliantWithV1_3(const V1_3::Capabilities&) { + return true; +} + +V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) { + return status; +} + +V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) { + switch (status) { + case V1_3::ErrorStatus::NONE: + return V1_0::ErrorStatus::NONE; + case V1_3::ErrorStatus::DEVICE_UNAVAILABLE: + return V1_0::ErrorStatus::DEVICE_UNAVAILABLE; + case V1_3::ErrorStatus::GENERAL_FAILURE: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: + return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; + case V1_3::ErrorStatus::INVALID_ARGUMENT: + return V1_0::ErrorStatus::INVALID_ARGUMENT; + case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: + return V1_0::ErrorStatus::GENERAL_FAILURE; + } + LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE"; + return V1_0::ErrorStatus::GENERAL_FAILURE; +} + +V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) { + return static_cast(status); +} + +V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) { + return status; +} + +static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) { + return static_cast(type); +} + +static V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) { + return static_cast(type); +} + +V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type) { + return static_cast(type); +} + +static V1_1::OperationType convertToV1_1(V1_0::OperationType type) { + return static_cast(type); +} + +static V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) { + return static_cast(type); +} + +V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type) { + return static_cast(type); +} + +static V1_2::OperationType convertToV1_2(V1_0::OperationType type) { + return static_cast(type); +} + +static V1_2::OperationType convertToV1_2(V1_1::OperationType type) { + return static_cast(type); +} + +V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type) { + return static_cast(type); +} + +static V1_3::OperationType convertToV1_3(V1_0::OperationType type) { + return static_cast(type); +} + +static V1_3::OperationType convertToV1_3(V1_1::OperationType type) { + return static_cast(type); +} + +static V1_3::OperationType convertToV1_3(V1_2::OperationType type) { + return static_cast(type); +} + +V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) { + return capabilities; +} + +V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) { + if (!compliantWithV1_0(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_1::Capabilities to V1_0::Capabilities"; + } + return {.float32Performance = capabilities.float32Performance, + .quantized8Performance = capabilities.quantized8Performance}; +} + +V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) { + if (!compliantWithV1_0(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_2::Capabilities to V1_0::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), + .quantized8Performance = lookup(capabilities.operandPerformance, + V1_2::OperandType::TENSOR_QUANT8_ASYMM)}; +} + +V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) { + if (!compliantWithV1_0(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_3::Capabilities to V1_0::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32), + .quantized8Performance = lookup(capabilities.operandPerformance, + V1_3::OperandType::TENSOR_QUANT8_ASYMM)}; +} + +V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) { + return {.float32Performance = capabilities.float32Performance, + .quantized8Performance = capabilities.quantized8Performance, + .relaxedFloat32toFloat16Performance = capabilities.float32Performance}; +} + +V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) { + return capabilities; +} + +V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) { + if (!compliantWithV1_1(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_2::Capabilities to V1_1::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), + .quantized8Performance = + lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM), + .relaxedFloat32toFloat16Performance = + capabilities.relaxedFloat32toFloat16PerformanceTensor}; +} + +V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) { + if (!compliantWithV1_1(capabilities)) { + LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) + << " from V1_3::Capabilities to V1_1::Capabilities"; + } + return {.float32Performance = + lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32), + .quantized8Performance = + lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM), + .relaxedFloat32toFloat16Performance = + capabilities.relaxedFloat32toFloat16PerformanceTensor}; +} + +V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) { + V1_2::Capabilities ret = { + .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance, + .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance, + .operandPerformance = + makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)}; + auto& opPerf = ret.operandPerformance; + opPerf.resize(opPerf.size() + 2); + opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, + capabilities.float32Performance}; + opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; + using OperandPerformance = V1_2::Capabilities::OperandPerformance; + std::sort(opPerf.begin(), opPerf.end(), + [](const OperandPerformance& a, const OperandPerformance& b) { + return a.type < b.type; + }); + return ret; +} + +V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) { + V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar = + capabilities.relaxedFloat32toFloat16Performance, + .relaxedFloat32toFloat16PerformanceTensor = + capabilities.relaxedFloat32toFloat16Performance, + .operandPerformance = makeQuantized8PerformanceConsistentWithP( + capabilities.quantized8Performance)}; + auto& opPerf = ret.operandPerformance; + opPerf.resize(opPerf.size() + 2); + opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, + capabilities.float32Performance}; + opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; + using OperandPerformance = V1_2::Capabilities::OperandPerformance; + std::sort(opPerf.begin(), opPerf.end(), + [](const OperandPerformance& a, const OperandPerformance& b) { + return a.type < b.type; + }); + return ret; +} + +V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) { + return capabilities; +} + +V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) { + V1_2::Capabilities ret = { + .relaxedFloat32toFloat16PerformanceScalar = + capabilities.relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = + capabilities.relaxedFloat32toFloat16PerformanceTensor, + }; + const auto& inputOpPerf = capabilities.operandPerformance; + hardware::hidl_vec opPerfSupported; + opPerfSupported.resize(inputOpPerf.size()); + auto last = + std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(), + [](V1_3::Capabilities::OperandPerformance opPerf) { + return validOperandType(static_cast(opPerf.type)); + }); + opPerfSupported.resize(std::distance(opPerfSupported.begin(), last)); + + auto& convertedOpPerf = ret.operandPerformance; + convertedOpPerf.resize(opPerfSupported.size()); + std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(), + [](V1_3::Capabilities::OperandPerformance opPerf) { + return V1_2::Capabilities::OperandPerformance{ + static_cast(opPerf.type), opPerf.info}; + }); + return ret; +} + +V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) { + return convertToV1_3(convertToV1_2(capabilities)); +} + +V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) { + return convertToV1_3(convertToV1_2(capabilities)); +} + +V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) { + V1_3::Capabilities ret = { + .relaxedFloat32toFloat16PerformanceScalar = + capabilities.relaxedFloat32toFloat16PerformanceScalar, + .relaxedFloat32toFloat16PerformanceTensor = + capabilities.relaxedFloat32toFloat16PerformanceTensor, + .ifPerformance = kNoPerformanceInfo, + .whilePerformance = kNoPerformanceInfo, + }; + auto& opPerf = ret.operandPerformance; + opPerf.resize(capabilities.operandPerformance.size()); + std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), + opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) { + return V1_3::Capabilities::OperandPerformance{ + static_cast(opPerf.type), opPerf.info}; + }); + return ret; +} + +V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) { + return capabilities; +} + +static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) { + return {.type = uncheckedConvertToV1_0(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) { + return {.type = convertToV1_1(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static hardware::hidl_vec uncheckedConvertToV1_0( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_1( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_0::Operation& operation) { return convertToV1_1(operation); }); + return result; +} + +bool compliantWithV1_0(const V1_3::Operand& operand) { + return validOperandType(static_cast(operand.type)) && + (nonExtensionOperandTypeIsScalar(static_cast(operand.type)) || + operand.dimensions.size() != 0) && + compliantWithV1_0(operand.lifetime); +} + +bool compliantWithV1_2(const V1_3::Operand& operand) { + return validOperandType(static_cast(operand.type)) && + compliantWithV1_0(operand.lifetime); +} + +bool compliantWithV1_3(const V1_3::Operand& operand) { + return true; +} + +static bool compliantWith(HalVersion version, const V1_3::Model& model, + std::set* noncompliantOperations) { + // A boolean vector indicating whether each pool is compliant with the target HAL version. + std::vector isPoolCompliant(model.pools.size(), false); + std::transform( + model.pools.begin(), model.pools.end(), isPoolCompliant.begin(), + [version](const hardware::hidl_memory& pool) { return validatePool(pool, version); }); + + // A boolean vector indicating whether each operand is compliant with the target HAL version. + std::vector isOperandCompliant(model.main.operands.size(), false); + std::transform(model.main.operands.begin(), model.main.operands.end(), + isOperandCompliant.begin(), + [&isPoolCompliant, version](const V1_3::Operand& op) { + bool is_operand_compliant = false; + switch (version) { + case HalVersion::UNKNOWN: + is_operand_compliant = false; + break; + case HalVersion::V1_0: + is_operand_compliant = compliantWithV1_0(op); + break; + case HalVersion::V1_1: + // There is no V1_1::Operand -- both V1_0::Model + // and V1_1::Model use V1_0::Operand. + is_operand_compliant = compliantWithV1_0(op); + break; + case HalVersion::V1_2: + is_operand_compliant = compliantWithV1_2(op); + break; + case HalVersion::V1_3: + is_operand_compliant = compliantWithV1_3(op); + break; + } + return is_operand_compliant && + !(op.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE && + !isPoolCompliant[op.location.poolIndex]); + }); + + auto allOperandsCompliant = [&isOperandCompliant](const hardware::hidl_vec& indices) { + return std::all_of( + indices.begin(), indices.end(), + [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; }); + }; + + auto localValidateOperation = [&model, version, + &allOperandsCompliant](const V1_3::Operation& op) { + if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false; + int error = validateOperation(static_cast(op.type), op.inputs.size(), + op.inputs.size() > 0 ? op.inputs.data() : nullptr, + op.outputs.size(), + op.outputs.size() > 0 ? op.outputs.data() : nullptr, + uncheckedConvert(model.main.operands), version); + return error == ANEURALNETWORKS_NO_ERROR; + }; + + if (noncompliantOperations) { + CHECK(noncompliantOperations->empty()); + for (uint32_t idx = 0; idx < model.main.operations.size(); ++idx) { + if (!localValidateOperation(model.main.operations[idx])) { + noncompliantOperations->insert(idx); + } + } + return noncompliantOperations->empty(); + } else { + return std::all_of(model.main.operations.begin(), model.main.operations.end(), + localValidateOperation); + } +} + +bool compliantWithV1_0(const V1_0::Model& model) { + return true; +} + +bool compliantWithV1_0(const V1_1::Model& model) { + // In addition to new enumeration values being introduced in V1_1::Model, a + // new flag was introduced to indicate whether or not float32 data can be + // calculated using float16 units. This 'relaxComputationFloat32toFloat16' + // flag is not relevant in whether a V1_1::Model is compliant with a + // V1_0::Model because all 1.0 drivers require strict calculation by default + // in the P NN runtime. Even if fp16 calculations are allowed, they can + // still be computed by a strict fp32 driver. + auto operands = uncheckedConvert(convertToV1_3(model.operands)); + return std::all_of(model.operations.begin(), model.operations.end(), + [&operands](const V1_1::Operation& op) { + int error = validateOperation( + static_cast(op.type), op.inputs.size(), + op.inputs.size() > 0 ? op.inputs.data() : nullptr, + op.outputs.size(), + op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands, + HalVersion::V1_0); + return error == ANEURALNETWORKS_NO_ERROR; + }); +} + +bool compliantWithV1_0(const V1_2::Model& model, std::set* noncompliantOperations) { + return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations); +} + +bool compliantWithV1_0(const V1_3::Model& model, std::set* noncompliantOperations) { + return compliantWith(HalVersion::V1_0, model, noncompliantOperations); +} + +bool compliantWithV1_1(const V1_0::Model&) { + return true; +} + +bool compliantWithV1_1(const V1_1::Model&) { + return true; +} + +bool compliantWithV1_1(const V1_2::Model& model, std::set* noncompliantOperations) { + return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations); +} + +bool compliantWithV1_1(const V1_3::Model& model, std::set* noncompliantOperations) { + return compliantWith(HalVersion::V1_1, model, noncompliantOperations); +} + +bool compliantWithV1_2(const V1_0::Model&) { + return true; +} + +bool compliantWithV1_2(const V1_1::Model&) { + return true; +} + +bool compliantWithV1_2(const V1_2::Model&, std::set* noncompliantOperations) { + return true; +} + +bool compliantWithV1_2(const V1_3::Model& model, std::set* noncompliantOperations) { + return compliantWith(HalVersion::V1_2, model, noncompliantOperations); +} + +static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) { + return {.type = uncheckedConvertToV1_0(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) { + return {.type = uncheckedConvertToV1_0(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) { + return {.type = uncheckedConvertToV1_1(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) { + return {.type = uncheckedConvertToV1_1(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) { + return {.type = convertToV1_2(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) { + return {.type = convertToV1_2(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) { + return {.type = uncheckedConvertToV1_2(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) { + return {.type = convertToV1_3(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) { + return {.type = convertToV1_3(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) { + return {.type = convertToV1_3(operation.type), + .inputs = operation.inputs, + .outputs = operation.outputs}; +} + +static hardware::hidl_vec uncheckedConvertToV1_0( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); }); + return result; +} + +static hardware::hidl_vec uncheckedConvertToV1_0( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); }); + return result; +} + +static hardware::hidl_vec uncheckedConvertToV1_2( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); }); + return result; +} + +static hardware::hidl_vec uncheckedConvertToV1_1( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); }); + return result; +} + +static hardware::hidl_vec uncheckedConvertToV1_1( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform( + operations.begin(), operations.end(), result.begin(), + [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_2( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_0::Operation& operation) { return convertToV1_2(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_2( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_1::Operation& operation) { return convertToV1_2(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_3( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_0::Operation& operation) { return convertToV1_3(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_3( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_1::Operation& operation) { return convertToV1_3(operation); }); + return result; +} + +static hardware::hidl_vec convertToV1_3( + const hardware::hidl_vec& operations) { + hardware::hidl_vec result(operations.size()); + std::transform(operations.begin(), operations.end(), result.begin(), + [](const V1_2::Operation& operation) { return convertToV1_3(operation); }); + return result; +} + +static bool compliantWithV1_0(const V1_2::OperandType& operandType) { + return validOperandType(static_cast(operandType)); +} + +static bool compliantWithV1_0(const V1_3::OperandType& operandType) { + return validOperandType(static_cast(operandType)); +} + +static bool compliantWithV1_2(const V1_3::OperandType& operandType) { + return validOperandType(static_cast(operandType)); +} + +V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) { + if (!compliantWithV1_0(operandType)) { + LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) + << " from V1_2::OperandType to V1_0::OperandType"; + } + return static_cast(operandType); +} + +V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) { + return static_cast(operandType); +} + +V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) { + if (!compliantWithV1_2(operandType)) { + LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) + << " from V1_3::OperandType to V1_2::OperandType"; + } + return static_cast(operandType); +} + +V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) { + if (!compliantWithV1_0(operandType)) { + LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) + << " from V1_3::Operand to V1_0::Operand"; + } + return static_cast(operandType); +} + +bool compliantWithV1_0(V1_0::OperandLifeTime lifetime) { + return true; +} + +bool compliantWithV1_0(V1_3::OperandLifeTime lifetime) { + return lifetime != V1_3::OperandLifeTime::SUBGRAPH; +} + +bool compliantWithV1_3(V1_0::OperandLifeTime lifetime) { + return true; +} + +bool compliantWithV1_3(V1_3::OperandLifeTime lifetime) { + return true; +} + +V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime) { + return lifetime; +} + +V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime) { + if (!compliantWithV1_0(lifetime)) { + LOG(ERROR) << "Upcasting non-compliant lifetime " << toString(lifetime) + << " from V1_3 to V1_0"; + } + return static_cast(lifetime); +} + +V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime) { + return static_cast(lifetime); +} + +V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime) { + return lifetime; +} + +V1_0::Operand convertToV1_0(const V1_2::Operand& operand) { + return {.type = convertToV1_0(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_0(operand.lifetime), + .location = operand.location}; +} + +V1_0::Operand convertToV1_0(const V1_3::Operand& operand) { + return {.type = convertToV1_0(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_0(operand.lifetime), + .location = operand.location}; +} + +V1_2::Operand convertToV1_2(const V1_0::Operand& operand) { + return {.type = convertToV1_2(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = operand.lifetime, + .location = operand.location}; +} + +V1_2::Operand convertToV1_2(const V1_3::Operand& operand) { + return {.type = convertToV1_2(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = static_cast(operand.lifetime), + .location = operand.location, + .extraParams = operand.extraParams}; +} + +V1_3::Operand convertToV1_3(const V1_0::Operand& operand) { + return {.type = static_cast(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_3(operand.lifetime), + .location = operand.location}; +} + +V1_3::Operand convertToV1_3(const V1_2::Operand& operand) { + return {.type = static_cast(operand.type), + .dimensions = operand.dimensions, + .numberOfConsumers = operand.numberOfConsumers, + .scale = operand.scale, + .zeroPoint = operand.zeroPoint, + .lifetime = convertToV1_3(operand.lifetime), + .location = operand.location, + .extraParams = operand.extraParams}; +} + +V1_3::Operand convertToV1_3(const V1_3::Operand& operand) { + return operand; +} + +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { + return operands; +} + +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_2::Operand& operand) { return convertToV1_0(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_3::Operand& operand) { return convertToV1_0(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_0::Operand& operand) { return convertToV1_2(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { + return operands; +} + +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_3::Operand& operand) { return convertToV1_2(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_0::Operand& operand) { return convertToV1_3(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { + hardware::hidl_vec result(operands.size()); + std::transform(operands.begin(), operands.end(), result.begin(), + [](const V1_2::Operand& operand) { return convertToV1_3(operand); }); + return result; +} + +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { + return operands; +} + +V1_0::Model convertToV1_0(const V1_0::Model& model) { + return model; +} + +V1_0::Model convertToV1_0(const V1_1::Model& model) { + if (!compliantWithV1_0(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_1::Model to V1_0::Model"; + } + return {.operands = model.operands, + .operations = uncheckedConvertToV1_0(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools}; +} + +V1_0::Model convertToV1_0(const V1_2::Model& model) { + if (!compliantWithV1_0(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_2::Model to V1_0::Model"; + } + return {.operands = convertToV1_0(model.operands), + .operations = uncheckedConvertToV1_0(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools}; +} + +V1_0::Model convertToV1_0(const V1_3::Model& model) { + if (!compliantWithV1_0(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_3::Model to V1_0::Model"; + } + return {.operands = convertToV1_0(model.main.operands), + .operations = uncheckedConvertToV1_0(model.main.operations), + .inputIndexes = model.main.inputIndexes, + .outputIndexes = model.main.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools}; +} + +V1_1::Model convertToV1_1(const V1_0::Model& model) { + return {.operands = model.operands, + .operations = convertToV1_1(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = false}; +} + +V1_1::Model convertToV1_1(const V1_1::Model& model) { + return model; +} + +V1_1::Model convertToV1_1(const V1_2::Model& model) { + if (!compliantWithV1_1(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_2::Model to V1_1::Model"; + } + return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical. + .operations = uncheckedConvertToV1_1(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_1::Model convertToV1_1(const V1_3::Model& model) { + if (!compliantWithV1_1(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_3::Model to V1_1::Model"; + } + return {// Operands in 1.1 and 1.0 are identical. + .operands = convertToV1_0(model.main.operands), + .operations = uncheckedConvertToV1_1(model.main.operations), + .inputIndexes = model.main.inputIndexes, + .outputIndexes = model.main.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_2::Model convertToV1_2(const V1_0::Model& model) { + return {.operands = convertToV1_2(model.operands), + .operations = convertToV1_2(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = false}; +} + +V1_2::Model convertToV1_2(const V1_1::Model& model) { + return {.operands = convertToV1_2(model.operands), + .operations = convertToV1_2(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_2::Model convertToV1_2(const V1_2::Model& model) { + return model; +} + +V1_2::Model convertToV1_2(const V1_3::Model& model) { + if (!compliantWithV1_2(model)) { + LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) + << " from V1_3::Model to V1_2::Model"; + } + return {.operands = convertToV1_2(model.main.operands), + .operations = uncheckedConvertToV1_2(model.main.operations), + .inputIndexes = model.main.inputIndexes, + .outputIndexes = model.main.outputIndexes, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, + .extensionNameToPrefix = model.extensionNameToPrefix}; +} + +V1_3::Model convertToV1_3(const V1_0::Model& model) { + return {.main = {.operands = convertToV1_3(model.operands), + .operations = convertToV1_3(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes}, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = false}; +} + +V1_3::Model convertToV1_3(const V1_1::Model& model) { + return {.main = {.operands = convertToV1_3(model.operands), + .operations = convertToV1_3(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes}, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; +} + +V1_3::Model convertToV1_3(const V1_2::Model& model) { + return {.main = {.operands = convertToV1_3(model.operands), + .operations = convertToV1_3(model.operations), + .inputIndexes = model.inputIndexes, + .outputIndexes = model.outputIndexes}, + .operandValues = model.operandValues, + .pools = model.pools, + .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, + .extensionNameToPrefix = model.extensionNameToPrefix}; +} + +V1_3::Model convertToV1_3(const V1_3::Model& model) { + return model; +} + +bool compliantWithV1_0(const V1_0::Request& request) { + return true; +} + +bool compliantWithV1_0(const V1_3::Request& request) { + return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { + if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { + return false; + } + const auto& name = pool.hidlMemory().name(); + return name == "ashmem" || name == "mmap_fd"; + }); +} + +bool compliantWithV1_2(const V1_3::Request& request) { + return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { + if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { + return false; + } + const auto& name = pool.hidlMemory().name(); + return name == "ashmem" || name == "mmap_fd" || name == "hardware_buffer_blob" || + name == "hardware_buffer"; + }); +} + +static hardware::hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) { + switch (pool.getDiscriminator()) { + case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory: + return pool.hidlMemory(); + case V1_3::Request::MemoryPool::hidl_discriminator::token: + return hardware::hidl_memory{}; + } +} + +static V1_3::Request::MemoryPool convertToV1_3(const hardware::hidl_memory& pool) { + V1_3::Request::MemoryPool ret; + ret.hidlMemory(pool); + return ret; +} + +V1_0::Request convertToV1_0(const V1_0::Request& request) { + return request; +} + +static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) { + hardware::hidl_vec pools(request.pools.size()); + std::transform(request.pools.begin(), request.pools.end(), pools.begin(), + [](const auto& pool) { return convertToV1_0(pool); }); + return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; +} + +V1_0::Request convertToV1_0(const V1_3::Request& request) { + if (!compliantWithV1_0(request)) { + LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) + << " from V1_3::Request to V1_0::Request of version 1.0"; + } + return uncheckedConvertToV1_0(request); +} + +V1_0::Request convertToV1_2(const V1_3::Request& request) { + if (!compliantWithV1_2(request)) { + LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) + << " from V1_3::Request to V1_0::Request of version 1.2"; + } + return uncheckedConvertToV1_0(request); +} + +V1_3::Request convertToV1_3(const V1_0::Request& request) { + hardware::hidl_vec pools(request.pools.size()); + std::transform(request.pools.begin(), request.pools.end(), pools.begin(), + [](const auto& pool) { return convertToV1_3(pool); }); + return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; +} + +V1_3::Request convertToV1_3(const V1_3::Request& request) { + return request; +} + +FenceState syncWait(int fd, int timeout) { + // This implementation is directly based on the ::sync_wait() implementation. + + struct pollfd fds; + int ret; + + if (fd < 0) { + errno = EINVAL; + return FenceState::UNKNOWN; + } + + fds.fd = fd; + fds.events = POLLIN; + + do { + ret = poll(&fds, 1, timeout); + if (ret > 0) { + if (fds.revents & POLLNVAL) { + errno = EINVAL; + return FenceState::UNKNOWN; + } + if (fds.revents & POLLERR) { + errno = EINVAL; + return FenceState::ERROR; + } + return FenceState::SIGNALED; + } else if (ret == 0) { + errno = ETIME; + return FenceState::ACTIVE; + } + } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); + + return FenceState::UNKNOWN; +} + +#ifdef NN_DEBUGGABLE +uint32_t getProp(const char* str, uint32_t defaultValue) { + const std::string propStr = android::base::GetProperty(str, ""); + if (propStr.size() > 0) { + return std::stoi(propStr); + } else { + return defaultValue; + } +} +#endif // NN_DEBUGGABLE + +ErrorStatus uncheckedConvert(V1_0::ErrorStatus status) { + return nnTryGetValue(convert(status)); +} + +ErrorStatus uncheckedConvert(V1_3::ErrorStatus status) { + return nnTryGetValue(convert(status)); +} + +OperandType uncheckedConvert(V1_3::OperandType operandType) { + return nnTryGetValue(convert(operandType)); +} + +OperationType uncheckedConvert(V1_3::OperationType operandType) { + return nnTryGetValue(convert(operandType)); +} + +Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime) { + return nnTryGetValue(convert(lifetime)); +} + +MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure) { + return nnTryGetValue(convert(measure)); +} + +DataLocation uncheckedConvert(const V1_0::DataLocation& location) { + return nnTryGetValue(convert(location)); +} + +Operand uncheckedConvert(const V1_3::Operand& operand) { + return nnTryGetValue(convert(operand)); +} + +Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params) { + return nnTryGetValue(convert(params)); +} + +Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params) { + return nnTryGetValue(convert(params)); +} + +Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec& params) { + return params; +} + +Operation uncheckedConvert(const V1_3::Operation& operation) { + return nnTryGetValue(convert(operation)); +} + +template +static std::vector convertVec(const hardware::hidl_vec& items) { + std::vector result(items.size()); + std::transform(items.begin(), items.end(), result.begin(), + [](const HalType& item) { return uncheckedConvert(item); }); + return result; +} + +Model uncheckedConvert(const V1_3::Model& model) { + return nnTryGetValue(convert(model)); +} + +Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph) { + return nnTryGetValue(convert(subgraph)); +} + +Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix& x) { + return nnTryGetValue(convert(x)); +} + +Request uncheckedConvert(const V1_3::Request& request) { + return nnTryGetValue(convert(request)); +} + +Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument) { + return nnTryGetValue(convert(requestArgument)); +} + +Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool) { + return nnTryGetValue(convert(memoryPool)); +} + +OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape) { + return nnTryGetValue(convert(outputShape)); +} + +std::vector uncheckedConvert( + const hardware::hidl_vec& outputShapes) { + return convertVec(outputShapes); +} + +Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities) { + return nnTryGetValue(convert(capabilities)); +} + +Capabilities::OperandPerformance uncheckedConvert( + const V1_3::Capabilities::OperandPerformance& operandPerformance) { + return nnTryGetValue(convert(operandPerformance)); +} + +Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo) { + return nnTryGetValue(convert(performanceInfo)); +} + +Extension uncheckedConvert(const V1_2::Extension& extension) { + return nnTryGetValue(convert(extension)); +} + +std::vector uncheckedConvert(const hardware::hidl_vec& extensions) { + return convertVec(extensions); +} + +Extension::OperandTypeInformation uncheckedConvert( + const V1_2::Extension::OperandTypeInformation& info) { + return nnTryGetValue(convert(info)); +} + +OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration) { + return nnTryGetValue(convert(timeoutDuration)); +} + +Timing uncheckedConvert(const V1_2::Timing& timing) { + return nnTryGetValue(convert(timing)); +} + +V1_0::ErrorStatus convertToV1_0(ErrorStatus status) { + return static_cast(static_cast(status)); +} + +V1_3::ErrorStatus convertToV1_3(ErrorStatus status) { + return nnTryGetValue(V1_3::utils::convert(status)); +} + +V1_3::OperandType convertToV1_3(OperandType operandType) { + return nnTryGetValue(V1_3::utils::convert(operandType)); +} + +V1_3::OperationType convertToV1_3(OperationType operandType) { + return nnTryGetValue(V1_3::utils::convert(operandType)); +} + +V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime) { + return nnTryGetValue(V1_3::utils::convert(lifetime)); +} + +V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference) { + return nnTryGetValue(V1_1::utils::convert(preference)); +} + +V1_3::Priority convertToV1_3(Priority priority) { + return nnTryGetValue(V1_3::utils::convert(priority)); +} + +V1_2::MeasureTiming convertToV1_2(MeasureTiming measure) { + return nnTryGetValue(V1_2::utils::convert(measure)); +} + +V1_0::DataLocation convertToV1_0(const DataLocation& location) { + return nnTryGetValue(V1_0::utils::convert(location)); +} + +V1_3::Operand convertToV1_3(const Operand& operand) { + return nnTryGetValue(V1_3::utils::convert(operand)); +} + +V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params) { + return nnTryGetValue(V1_2::utils::convert(params)); +} + +V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params) { + return nnTryGetValue(V1_2::utils::convert(params)); +} + +hardware::hidl_vec uncheckedConvert(const Operand::ExtensionParams& params) { + return params; +} + +V1_3::Operation convertToV1_3(const Operation& operation) { + return nnTryGetValue(V1_3::utils::convert(operation)); +} + +template +static hardware::hidl_vec convertVecToV1_0(const std::vector& items) { + hardware::hidl_vec result(items.size()); + std::transform(items.begin(), items.end(), result.begin(), + [](const CanonicalType& item) { return convertToV1_0(item); }); + return result; +} + +template +static hardware::hidl_vec convertVecToV1_2(const std::vector& items) { + hardware::hidl_vec result(items.size()); + std::transform(items.begin(), items.end(), result.begin(), + [](const CanonicalType& item) { return convertToV1_2(item); }); + return result; +} + +template +static hardware::hidl_vec convertVecToV1_3(const std::vector& items) { + hardware::hidl_vec result(items.size()); + std::transform(items.begin(), items.end(), result.begin(), + [](const CanonicalType& item) { return convertToV1_3(item); }); + return result; +} + +V1_2::OutputShape convertToV1_2(const OutputShape& outputShape) { + return nnTryGetValue(V1_2::utils::convert(outputShape)); +} + +hardware::hidl_vec convertToV1_2(const std::vector& outputShapes) { + return convertVecToV1_2(outputShapes); +} + +V1_3::Model convertToV1_3(const Model& model) { + return nnTryGetValue(V1_3::utils::convert(model)); +} + +V1_3::Subgraph convertToV1_3(const Model::Subgraph& subgraph) { + return nnTryGetValue(V1_3::utils::convert(subgraph)); +} + +V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix& x) { + return nnTryGetValue(V1_2::utils::convert(x)); +} + +V1_3::Request convertToV1_3(const Request& request) { + return nnTryGetValue(V1_3::utils::convert(request)); +} + +V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument) { + return nnTryGetValue(V1_0::utils::convert(requestArgument)); +} + +V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool) { + return nnTryGetValue(V1_3::utils::convert(memoryPool)); +} + +std::vector uncheckedConvert( + const hardware::hidl_vec& memoryPools) { + return convertVec(memoryPools); +} + +V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint) { + return nnTryGetValue(V1_3::utils::convert(timePoint)); +} + +V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration) { + return nnTryGetValue(V1_3::utils::convert(timeoutDuration)); +} + +V1_2::Timing convertToV1_2(const Timing& timing) { + return nnTryGetValue(V1_2::utils::convert(timing)); +} + +V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole) { + return nnTryGetValue(V1_3::utils::convert(bufferRole)); +} + +hardware::hidl_vec convertToV1_3(const std::vector& bufferRoles) { + return convertVecToV1_3(bufferRoles); +} + +hardware::hidl_vec convertToV1_0(const Model::OperandValues& operandValues) { + return nnTryGetValue(V1_0::utils::convert(operandValues)); +} + +hardware::hidl_memory convertToV1_0(const Memory& memory) { + return nnTryGetValue(V1_0::utils::convert(memory)); +} + +Memory uncheckedConvert(const hardware::hidl_memory& memory) { + return nnTryGetValue(convert(memory)); +} + +hardware::hidl_vec convertToV1_0(const std::vector& memories) { + return convertVecToV1_0(memories); +} + +std::vector uncheckedConvert(const hardware::hidl_vec& memories) { + return convertVec(memories); +} + +std::vector uncheckedConvert(const hardware::hidl_vec& subgraphs) { + return convertVec(subgraphs); +} + +std::vector uncheckedConvert(const hardware::hidl_vec& operands) { + return convertVec(operands); +} + +} // namespace nn +} // namespace android diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp deleted file mode 100644 index 7417ed8bf..000000000 --- a/nn/common/Utils.cpp +++ /dev/null @@ -1,3565 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#define LOG_TAG "Utils" - -#include "Utils.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ControlFlow.h" -#include "NeuralNetworks.h" -#include "NeuralNetworksOEM.h" -#include "OperationResolver.h" -#include "ValidateHal.h" -#include "nnapi/TypeUtils.h" - -namespace android { -namespace nn { - -constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX}; - -const char kVLogPropKey[] = "debug.nn.vlog"; -int vLogMask = ~0; - -// Split the space separated list of tags from verbose log setting and build the -// logging mask from it. note that '1' and 'all' are special cases to enable all -// verbose logging. -// -// NN API verbose logging setting comes from system property debug.nn.vlog. -// Example: -// setprop debug.nn.vlog 1 : enable all logging tags. -// setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and -// COMPILATION tags. -void initVLogMask() { - vLogMask = 0; - const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, ""); - if (vLogSetting.empty()) { - return; - } - - std::unordered_map vLogFlags = {{"1", -1}, - {"all", -1}, - {"model", MODEL}, - {"compilation", COMPILATION}, - {"execution", EXECUTION}, - {"cpuexe", CPUEXE}, - {"manager", MANAGER}, - {"driver", DRIVER}, - {"memory", MEMORY}}; - - std::vector elements = android::base::Split(vLogSetting, " ,:"); - for (const auto& elem : elements) { - const auto& flag = vLogFlags.find(elem); - if (flag == vLogFlags.end()) { - LOG(ERROR) << "Unknown trace flag: " << elem; - continue; - } - - if (flag->second == -1) { - // -1 is used for the special values "1" and "all" that enable all - // tracing. - vLogMask = ~0; - return; - } else { - vLogMask |= 1 << flag->second; - } - } -} - -TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds) { - // According to the standard, std::chrono::nanoseconds::rep is a signed - // integer type of at least 64 bits. This check prevents an overflow when - // rep is exactly 64 bits. - if constexpr (sizeof(std::chrono::nanoseconds::rep) == sizeof(int64_t)) { - nanoseconds = std::min(nanoseconds, - static_cast(std::chrono::nanoseconds::max().count())); - } - return std::chrono::nanoseconds{nanoseconds}; -} - -Deadline makeDeadline(TimeoutDuration duration) { - const auto maxTime = Deadline::max(); - const auto currentTime = std::chrono::steady_clock::now(); - - // If there would be an overflow, use the max value. - if (duration > maxTime - currentTime) { - return maxTime; - } - return currentTime + duration; -} - -static uint64_t getMaxNanosecondsSinceEpoch() { - const auto maxTime = - std::chrono::time_point::max(); - return maxTime.time_since_epoch().count(); -} - -std::optional makeDeadline(const V1_3::OptionalTimePoint& timePoint) { - using Discriminator = V1_3::OptionalTimePoint::hidl_discriminator; - if (timePoint.getDiscriminator() == Discriminator::none) { - return std::nullopt; - } - const uint64_t nanosecondsSinceEpoch = timePoint.nanosecondsSinceEpoch(); - const uint64_t maxNanosecondsSinceEpoch = getMaxNanosecondsSinceEpoch(); - - // Clamp time point to max. - if (nanosecondsSinceEpoch >= maxNanosecondsSinceEpoch) { - return Deadline::max(); - } - - // Return provided time point. - return Deadline{std::chrono::nanoseconds{nanosecondsSinceEpoch}}; -} - -bool hasDeadlinePassed(const std::optional& deadline) { - if (!deadline.has_value()) { - return false; - } - return std::chrono::steady_clock::now() >= *deadline; -} - -static OptionalTimePoint makeTimePoint(const Deadline& deadline) { - return deadline; -} - -OptionalTimePoint makeTimePoint(const std::optional& deadline) { - return deadline.has_value() ? makeTimePoint(*deadline) : OptionalTimePoint{}; -} - -static bool isExtensionOperandType(int32_t type) { - return (static_cast(type) >> kExtensionTypeBits) != 0; -} - -static bool isExtensionOperationType(ANeuralNetworksOperationType type) { - return (static_cast(type) >> kExtensionTypeBits) != 0; -} - -bool isExtensionOperandType(V1_3::OperandType type) { - return isExtensionOperandType(static_cast(type)); -} - -bool isExtensionOperationType(V1_3::OperationType type) { - return isExtensionOperationType(static_cast(type)); -} - -namespace { - -template -EntryType tableLookup(const EntryType (&table)[entryCount], - const EntryType (&tableOEM)[entryCountOEM], uint32_t code) { - if (code < entryCount) { - return table[code]; - } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) { - return tableOEM[code - kOEMCodeBase]; - } else { - nnAssert(!"tableLookup: bad code"); - return EntryType(); - } -} - -static Version convert(HalVersion halVersion) { - switch (halVersion) { - case HalVersion::UNKNOWN: - break; - case HalVersion::V1_0: - return Version::ANDROID_OC_MR1; - case HalVersion::V1_1: - return Version::ANDROID_P; - case HalVersion::V1_2: - return Version::ANDROID_Q; - case HalVersion::V1_3: - return Version::ANDROID_R; - } - LOG(FATAL) << "Cannot convert " << halVersion; - return {}; -} - -class OperationValidationContext : public IOperationValidationContext { - DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext); - - public: - OperationValidationContext(const char* operationName, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, const Operand* operands) - : operationName(operationName), - inputCount(inputCount), - inputIndexes(inputIndexes), - outputCount(outputCount), - outputIndexes(outputIndexes), - operands(operands) {} - - const char* getOperationName() const override; - - uint32_t getNumInputs() const override; - OperandType getInputType(uint32_t index) const override; - Shape getInputShape(uint32_t index) const override; - const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override; - - uint32_t getNumOutputs() const override; - OperandType getOutputType(uint32_t index) const override; - Shape getOutputShape(uint32_t index) const override; - - private: - const Operand* getInputOperand(uint32_t index) const; - const Operand* getOutputOperand(uint32_t index) const; - - const char* operationName; - uint32_t inputCount; - const uint32_t* inputIndexes; - uint32_t outputCount; - const uint32_t* outputIndexes; - const Operand* operands; - Version version; -}; - -const char* OperationValidationContext::getOperationName() const { - return operationName; -} - -const Operand* OperationValidationContext::getInputOperand(uint32_t index) const { - CHECK(index < static_cast(inputCount)); - return &operands[inputIndexes[index]]; -} - -const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const { - CHECK(index < static_cast(outputCount)); - return &operands[outputIndexes[index]]; -} - -uint32_t OperationValidationContext::getNumInputs() const { - return inputCount; -} - -uint32_t OperationValidationContext::getNumOutputs() const { - return outputCount; -} - -OperandType OperationValidationContext::getInputType(uint32_t index) const { - return getInputOperand(index)->type; -} - -Shape OperationValidationContext::getInputShape(uint32_t index) const { - const Operand* operand = getInputOperand(index); - return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, - operand->extraParams}; -} - -const Operand::ExtraParams& OperationValidationContext::getInputExtraParams(uint32_t index) const { - return getInputOperand(index)->extraParams; -} - -OperandType OperationValidationContext::getOutputType(uint32_t index) const { - return getOutputOperand(index)->type; -} - -Shape OperationValidationContext::getOutputShape(uint32_t index) const { - const Operand* operand = getOutputOperand(index); - return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, - operand->extraParams}; -} - -}; // anonymous namespace - -#define COUNT(X) (sizeof(X) / sizeof(X[0])) - -std::string getOperandTypeName(V1_3::OperandType type) { - return toString(type); -} - -std::string getOperationName(V1_3::OperationType type) { - return toString(type); -} - -const uint32_t kSizeOfDataType[]{ - 4, // ANEURALNETWORKS_FLOAT32 - 4, // ANEURALNETWORKS_INT32 - 4, // ANEURALNETWORKS_UINT32 - 4, // ANEURALNETWORKS_TENSOR_FLOAT32 - 4, // ANEURALNETWORKS_TENSOR_INT32 - 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM - 1, // ANEURALNETWORKS_BOOL - 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM - 2, // ANEURALNETWORKS_TENSOR_FLOAT16 - 1, // ANEURALNETWORKS_TENSOR_BOOL8 - 2, // ANEURALNETWORKS_FLOAT16 - 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL - 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM - 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM - 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED - 0, // ANEURALNETWORKS_MODEL -}; - -static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect"); - -const bool kScalarDataType[]{ - true, // ANEURALNETWORKS_FLOAT32 - true, // ANEURALNETWORKS_INT32 - true, // ANEURALNETWORKS_UINT32 - false, // ANEURALNETWORKS_TENSOR_FLOAT32 - false, // ANEURALNETWORKS_TENSOR_INT32 - false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM - true, // ANEURALNETWORKS_BOOL - false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM - false, // ANEURALNETWORKS_TENSOR_FLOAT16 - false, // ANEURALNETWORKS_TENSOR_BOOL8 - true, // ANEURALNETWORKS_FLOAT16 - false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL - false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM - false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM - false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED - true, // ANEURALNETWORKS_MODEL -}; - -static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect"); - -const uint32_t kSizeOfDataTypeOEM[]{ - 0, // ANEURALNETWORKS_OEM - 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE -}; - -static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM, - "kSizeOfDataTypeOEM is incorrect"); - -const bool kScalarDataTypeOEM[]{ - true, // ANEURALNETWORKS_OEM - false, // ANEURALNETWORKS_TENSOR_OEM_BYTE -}; - -static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM, - "kScalarDataTypeOEM is incorrect"); - -bool nonExtensionOperandTypeIsScalar(int type) { - CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported"; - return tableLookup(kScalarDataType, kScalarDataTypeOEM, type); -} - -uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions) { - const size_t size = getNonExtensionSize(type, dimensions).value(); - CHECK_LE(size, std::numeric_limits::max()); - return size; -} - -uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, - const std::vector& dimensions) { - return nonExtensionOperandSizeOfData(uncheckedConvert(type), dimensions); -} - -// Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t. -static std::pair sizeOfTensorDataHelper(uint32_t sizeOfElement, - const std::vector& dimensions) { - if (dimensions.empty()) { - return {false, 0}; - } - uint64_t size = static_cast(sizeOfElement); - constexpr uint64_t kMaxSize = static_cast(std::numeric_limits::max()); - for (uint32_t d : dimensions) { - size *= d; - if (size > kMaxSize) return {true, 0}; - } - return {false, static_cast(size)}; -} - -uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector& dimensions) { - const auto [overflow, size] = sizeOfTensorDataHelper(sizeOfElement, dimensions); - CHECK(!overflow); - return size; -} - -bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, - const std::vector& dimensions) { - CHECK(!isExtension(type)) << "Size of extension operand data is unknown"; - int n = static_cast(type); - uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n); - return tableLookup(kScalarDataType, kScalarDataTypeOEM, n) - ? false - : sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions); -} - -bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, - const std::vector& dimensions) { - return nonExtensionOperandSizeOfDataOverflowsUInt32(uncheckedConvert(type), dimensions); -} - -bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement, - const std::vector& dimensions) { - return sizeOfTensorDataHelper(sizeOfElement, dimensions).first; -} - -bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) { - if (!isExtensionOperandType(type)) { - CHECK(!nonExtensionOperandTypeIsScalar(type)) - << "A scalar type can never have unspecified dimensions"; - } - return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount); -} - -bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector& dimensions) { - return tensorHasUnspecifiedDimensions(static_cast(type), dimensions.data(), - dimensions.size()); -} - -bool tensorHasUnspecifiedDimensions(V1_3::OperandType type, - const std::vector& dimensions) { - return tensorHasUnspecifiedDimensions(static_cast(type), dimensions.data(), - dimensions.size()); -} - -bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) { - return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount); -} - -bool tensorHasUnspecifiedDimensions(const Operand& operand) { - return tensorHasUnspecifiedDimensions(operand.type, operand.dimensions); -} - -bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand) { - return tensorHasUnspecifiedDimensions(static_cast(operand.type), operand.dimensions.data(), - operand.dimensions.size()); -} - -uint32_t alignBytesNeeded(uint32_t index, size_t length) { - uint32_t pattern; - if (length < 2) { - pattern = 0; // No alignment necessary - } else if (length < 4) { - pattern = 1; // Align on 2-byte boundary - } else { - pattern = 3; // Align on 4-byte boundary - } - uint32_t extra = (~(index - 1)) & pattern; - return extra; -} - -void logModelToInfo(const V1_0::Model& model) { - LOG(INFO) << "V1_0::Model start"; - LOG(INFO) << "operands" << toString(model.operands); - LOG(INFO) << "operations" << toString(model.operations); - LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); - LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); - LOG(INFO) << "operandValues size" << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); -} - -void logModelToInfo(const V1_1::Model& model) { - LOG(INFO) << "V1_1::Model start"; - LOG(INFO) << "operands" << toString(model.operands); - LOG(INFO) << "operations" << toString(model.operations); - LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); - LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); - LOG(INFO) << "operandValues size " << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); -} - -void logModelToInfo(const V1_2::Model& model) { - LOG(INFO) << "V1_2::Model start"; - LOG(INFO) << "operands" << toString(model.operands); - LOG(INFO) << "operations" << toString(model.operations); - LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); - LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); - LOG(INFO) << "operandValues size" << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); - LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16; - LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); -} - -static void logSubgraphToInfo(std::string label, const V1_3::Subgraph& subgraph) { - LOG(INFO) << label << ".operands" << toString(subgraph.operands); - LOG(INFO) << label << ".operations" << toString(subgraph.operations); - LOG(INFO) << label << ".inputIndexes" << toString(subgraph.inputIndexes); - LOG(INFO) << label << ".outputIndexes" << toString(subgraph.outputIndexes); -} - -void logModelToInfo(const V1_3::Model& model) { - LOG(INFO) << "V1_3::Model start"; - logSubgraphToInfo("main", model.main); - for (uint32_t i = 0, n = model.referenced.size(); i < n; ++i) { - logSubgraphToInfo("referenced[" + std::to_string(i) + "]", model.referenced[i]); - } - LOG(INFO) << "operandValues size " << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); - LOG(INFO) << "relaxComputationFloat32toFloat16 " << model.relaxComputationFloat32toFloat16; - LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); -} - -void logModelToInfo(const Model& model) { - LOG(INFO) << "Model start"; - logModelToInfo(convertToV1_3(model)); -} - -bool validateOperandSymmPerChannelQuantParams( - const V1_3::Operand& halOperand, - const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag) { - if (halOperand.type != V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - return false; - } - - NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag; - NN_RET_CHECK(channelQuant.scales != nullptr) << tag; - NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag; - NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u) - << tag << " channel dimension " << channelQuant.channelDim << " is underspecified"; - for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) { - NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]"; - } - return true; -} - -static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type"; - NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type"; - return true; -} - -static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type, - const char* tag) { - NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; - NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero"; - return true; -} - -static bool validateTensorDimensions( - const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, - bool allowPartial) { - if (!allowPartial) { - NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions"; - } - uint64_t size = - isExtensionOperandType(type.type) - ? extensionOperandTypeInfo->byteSize - : tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, static_cast(type.type)); - constexpr uint64_t kMaxSize = std::numeric_limits::max(); - for (uint32_t i = 0; i < type.dimensionCount; i++) { - if (!allowPartial) { - NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions"; - } - if (type.dimensions[i] != 0) { - size *= type.dimensions[i]; - NN_RET_CHECK_LE(size, kMaxSize) << tag << " operand byte size exceeds " << kMaxSize; - } - } - return true; -} - -static bool validateOperandTypeHelper( - const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, - bool allowPartial) { - NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr); - if (isExtensionOperandType(type.type)) { - NN_RET_CHECK(extensionOperandTypeInfo != nullptr); - if (extensionOperandTypeInfo->isTensor) { - NN_RET_CHECK( - validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); - } else { - NN_RET_CHECK(validateScalarDimensions(type, tag)); - } - return validateNoQuantParams(type, tag); - } - - NN_RET_CHECK(extensionOperandTypeInfo == nullptr); - NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type)) - << tag << " invalid OperandType: " << type.type; - - bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type); - if (isScalar) { - NN_RET_CHECK(validateScalarDimensions(type, tag)); - if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types - // to use quantization parameters. - NN_RET_CHECK(validateNoQuantParams(type, tag)); - } - } else { - NN_RET_CHECK(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); - if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) { - NN_RET_CHECK(validateQuant8AsymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) { - NN_RET_CHECK(validateQuant8SymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) { - NN_RET_CHECK(validateQuant16AsymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) { - NN_RET_CHECK(validateQuantSymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) { - // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters. - } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) { - // Historically, we have allowed OEM types to use quantization parameters. - } else { - NN_RET_CHECK(validateNoQuantParams(type, tag)); - } - } - - return true; -} - -int validateOperandType(const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, - const char* tag, bool allowPartial) { - return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial) - ? ANEURALNETWORKS_NO_ERROR - : ANEURALNETWORKS_BAD_DATA; -} - -int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, - const char* tag) { - for (uint32_t i = 0; i < count; i++) { - if (list[i] >= operandCount) { - LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i] - << ", operandCount " << operandCount; - return ANEURALNETWORKS_BAD_DATA; - } - } - return ANEURALNETWORKS_NO_ERROR; -} - -int validateOperationOperandTypes(const std::vector& operands, uint32_t inOperandCount, - const uint32_t* inOperandIndexes, - const std::vector& inExpectedTypes, - uint32_t outOperandCount, const uint32_t* outOperandIndexes, - const std::vector& outExpectedInTypes) { - if (inOperandCount != static_cast(inExpectedTypes.size()) || - outOperandCount != static_cast(outExpectedInTypes.size())) { - LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and " - << outExpectedInTypes.size() << " outputs," - << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs"; - return ANEURALNETWORKS_BAD_DATA; - } - for (uint32_t i = 0; i < inOperandCount; i++) { - if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) { - LOG(ERROR) << "Invalid input tensor type " << operands[inOperandIndexes[i]].type - << " for input " << i << ", expected " << inExpectedTypes[i]; - return ANEURALNETWORKS_BAD_DATA; - } - } - for (uint32_t i = 0; i < outOperandCount; i++) { - if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) { - LOG(ERROR) << "Invalid output tensor type " << operands[outOperandIndexes[i]].type - << " for input " << i << ", expected " << outExpectedInTypes[i]; - return ANEURALNETWORKS_BAD_DATA; - } - } - - return ANEURALNETWORKS_NO_ERROR; -} - -static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion, - HalVersion minSupportedHalVersion) { - if (halVersion < minSupportedHalVersion) { - LOG(ERROR) << "The given inputs and outputs for operation " << opType - << " are only supported in " << minSupportedHalVersion - << " and later (validating using " << halVersion << ")"; - return ANEURALNETWORKS_BAD_DATA; - } - return ANEURALNETWORKS_NO_ERROR; -} - -// Checks if two operands have the same types, ranks (if specified), dimensions -// (if specified), scales, zeroPoints, and extraParams. -static bool compatible(const Operand& a, const Operand& b) { - NN_RET_CHECK(a.type == b.type) << a.type << " != " << b.type; - if (a.dimensions.size() != 0 && b.dimensions.size() != 0) { - NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions"; - for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) { - if (a.dimensions[i] != 0 && b.dimensions[i] != 0) { - NN_RET_CHECK_EQ(a.dimensions[i], b.dimensions[i]) << "Incompatible dimensions"; - } - } - } - NN_RET_CHECK_EQ(a.scale, b.scale); - NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint); - NN_RET_CHECK(a.extraParams == b.extraParams) << a.extraParams << " != " << b.extraParams; - return true; -} - -static bool validateConditionOperand(const Operand& operand) { - NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8) - << "Unexpected condition operand type: " << operand.type; - NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton"; - NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton"; - return true; -} - -static void checkSubgraphValidationHelper(const SubgraphValidationHelper& helper) { - CHECK(helper.isValidSubgraphReference != nullptr); - CHECK(helper.getSubgraphInputCount != nullptr); - CHECK(helper.getSubgraphOutputCount != nullptr); - CHECK(helper.getSubgraphInputOperand != nullptr); - CHECK(helper.getSubgraphOutputOperand != nullptr); -} - -static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, - const uint32_t* outputs, const std::vector& operands, - const SubgraphValidationHelper& helper) { - namespace op = operation_if; - checkSubgraphValidationHelper(helper); - NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_IF must have at least 3 inputs"; - NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_IF must have at least 1 output"; - auto validateBranchOperand = [&](const Operand& branchModelOperand) -> bool { - NN_RET_CHECK(helper.isValidSubgraphReference(branchModelOperand)) - << "Operand is not a valid subgraph reference"; - const uint32_t branchModelInputCount = helper.getSubgraphInputCount(branchModelOperand); - const uint32_t branchModelOutputCount = helper.getSubgraphOutputCount(branchModelOperand); - NN_RET_CHECK_EQ(inputCount, op::kFirstInput + branchModelInputCount); - NN_RET_CHECK_EQ(outputCount, branchModelOutputCount); - for (uint32_t i = 0; i < branchModelInputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphInputOperand(branchModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - } - for (uint32_t i = 0; i < branchModelOutputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphOutputOperand(branchModelOperand, i); - const Operand& outerOperand = operands[outputs[i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - } - return true; - }; - NN_RET_CHECK(validateConditionOperand(operands[inputs[op::kCondBoolOperand]])) - << "Validation failed for IF condition operand"; - NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kThenModelOperand]])) - << "Validation failed for IF then model"; - NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kElseModelOperand]])) - << "Validation failed for IF else model"; - return true; -} - -static bool validateControlFlowOperandUnknownSize(const SubgraphValidationHelper& helper, - const Operand& operand) { - if (!helper.allowControlFlowOperationWithOperandOfUnknownSize && !isExtension(operand.type)) { - NN_RET_CHECK_NE(nonExtensionOperandSizeOfData(operand.type, operand.dimensions), 0u); - } - return true; -} - -static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs, - uint32_t outputCount, const uint32_t* outputs, - const std::vector& operands, - const SubgraphValidationHelper& helper) { - // Let the loop have - // - m >= 1 input-output operands, - // - k >= 0 state-only operands, and - // - n >= 0 input-only operands. - // Then - // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs. - // - the condition model has (m + k + n) inputs and 1 output. - // - the body model has (m + k + n) inputs and (m + k) outputs. - namespace op = operation_while; - checkSubgraphValidationHelper(helper); - NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_WHILE must have at least 3 inputs"; - NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_WHILE must have at least 1 output"; - auto validateCondOperand = [&](const Operand& condModelOperand) -> bool { - NN_RET_CHECK(helper.isValidSubgraphReference(condModelOperand)) - << "Operand is not a valid subgraph reference"; - const uint32_t condModelInputCount = helper.getSubgraphInputCount(condModelOperand); - const uint32_t condModelOutputCount = helper.getSubgraphOutputCount(condModelOperand); - NN_RET_CHECK_EQ(inputCount, op::kFirstInput + condModelInputCount); - NN_RET_CHECK_EQ(condModelOutputCount, 1u); - for (uint32_t i = 0; i < condModelInputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphInputOperand(condModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); - } - NN_RET_CHECK( - validateConditionOperand(*helper.getSubgraphOutputOperand(condModelOperand, 0))); - return true; - }; - auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> bool { - NN_RET_CHECK(helper.isValidSubgraphReference(bodyModelOperand)) - << "Operand is not a valid subgraph reference"; - const uint32_t bodyModelInputCount = helper.getSubgraphInputCount(bodyModelOperand); - const uint32_t bodyModelOutputCount = helper.getSubgraphOutputCount(bodyModelOperand); - NN_RET_CHECK_EQ(inputCount, op::kFirstInput + bodyModelInputCount); - NN_RET_CHECK_GE(bodyModelOutputCount, outputCount); - NN_RET_CHECK_GE(bodyModelInputCount, bodyModelOutputCount); - const uint32_t inputOutputCount = outputCount; - const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount; - const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount; - for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) { - const Operand& innerOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); - } - for (uint32_t i = 0; i < inputOutputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); - const Operand& outerOperand = operands[outputs[i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); - } - for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) { - const Operand& inputOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); - const Operand& outputOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); - NN_RET_CHECK(compatible(inputOperand, outputOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outputOperand)); - } - return true; - }; - NN_RET_CHECK(validateCondOperand(operands[inputs[op::kCondModelOperand]])) - << "Validation failed for WHILE condition model"; - NN_RET_CHECK(validateBodyOperand(operands[inputs[op::kBodyModelOperand]])) - << "Validation failed for WHILE body model"; - return true; -} - -static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, - const std::vector& operands, HalVersion halVersion) { - if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - LOG(ERROR) << "This validateOperation() overload does not support control flow"; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperation(opType, inputCount, inputIndexes, outputCount, outputIndexes, operands, - halVersion, {}); -} - -int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, const std::vector& operands, - HalVersion halVersion, const SubgraphValidationHelper& helper) { - NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes, - static_cast(operands.size()), - "ANeuralNetworksModel_addOperation inputs")); - NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes, - static_cast(operands.size()), - "ANeuralNetworksModel_addOperation outputs")); - - if (isExtensionOperationType(opType)) { - if (halVersion < HalVersion::V1_2) { - LOG(ERROR) - << "Extension operations are supported since HAL version 1.2, validating using " - << halVersion; - return ANEURALNETWORKS_BAD_DATA; - } - // There is no other validation we can do for an extension operation. - return ANEURALNETWORKS_NO_ERROR; - } - - auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn - << ") or output operands (" << outputCount << ", expected " << expOut - << ") for operation " << opType; - }; - - switch (opType) { - case ANEURALNETWORKS_OEM_OPERATION: { - return ANEURALNETWORKS_NO_ERROR; - } - case ANEURALNETWORKS_RESHAPE: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_DEPTH_TO_SPACE: { - if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 3 or 2) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SPACE_TO_DEPTH: { - if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 3 or 2) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_EMBEDDING_LOOKUP: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[1]].type; - if (inputType != OperandType::TENSOR_FLOAT16 && - inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM && - inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes = {OperandType::TENSOR_INT32, inputType}; - std::vector outExpectedTypes = {inputType}; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else if (inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_HASHTABLE_LOOKUP: { - if (inputCount != 3 || outputCount != 2) { - logInvalidInOutNumber(3, 2); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[2]].type; - if (inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes = {OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, inputType}; - std::vector outExpectedTypes = {inputType, - OperandType::TENSOR_QUANT8_ASYMM}; - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_LSH_PROJECTION: { - if (inputCount != 4 || outputCount != 1) { - logInvalidInOutNumber(4, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[1]].type; - if (inputType != OperandType::TENSOR_FLOAT16 && - inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto hashType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - if (hashType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - inputType, - OperandType::TENSOR_FLOAT16, - OperandType::INT32, - }; - } else if (hashType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - inputType, - OperandType::TENSOR_FLOAT32, - OperandType::INT32, - }; - } else { - LOG(ERROR) << "Unsupported hash tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector outExpectedTypes = {OperandType::TENSOR_INT32}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: { - const uint32_t kNumOutputs = 2; - const uint32_t kNumOutputsMerged = 1; - const uint32_t kNumOutputsWithState = 6; - const uint32_t kNumOutputsMergedWithState = 5; - if (inputCount != 61 || - (outputCount != kNumOutputs && outputCount != kNumOutputsMerged && - outputCount != kNumOutputsWithState && - outputCount != kNumOutputsMergedWithState)) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 61) or output operands (" << outputCount - << ", expected 1, 2, 5 or 6) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - std::vector inExpectedTypes; - auto inputType = operands[inputIndexes[0]].type; - if (inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_FLOAT16) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - inExpectedTypes = {}; - for (int i = 0; i < 48; ++i) { - inExpectedTypes.push_back(inputType); - } - inExpectedTypes.push_back(OperandType::INT32); - inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 - ? OperandType::FLOAT32 - : OperandType::FLOAT16); - inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 - ? OperandType::FLOAT32 - : OperandType::FLOAT16); - inExpectedTypes.push_back(OperandType::BOOL); - inExpectedTypes.push_back(OperandType::BOOL); - for (int i = 0; i < 8; ++i) { - inExpectedTypes.push_back(inputType); - } - - HalVersion minSupportedHalVersion = HalVersion::V1_2; - if (outputCount == kNumOutputsWithState || outputCount == kNumOutputsMergedWithState) { - minSupportedHalVersion = HalVersion::V1_3; - } - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, minSupportedHalVersion)); - std::vector outExpectedTypes(outputCount, inputType); - auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - return status; - } - case ANEURALNETWORKS_LSTM: { - if ((inputCount != 23 && inputCount != 27) || outputCount != 4) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 23 or 27) or output operands (" << outputCount - << ", expected 4) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes; - std::vector outExpectedTypes; - auto inputType = operands[inputIndexes[0]].type; - if (inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_FLOAT16) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - inExpectedTypes = {inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - OperandType::INT32}; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes.push_back(OperandType::FLOAT32); - inExpectedTypes.push_back(OperandType::FLOAT32); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes.push_back(OperandType::FLOAT16); - inExpectedTypes.push_back(OperandType::FLOAT16); - } - - outExpectedTypes = {inputType, inputType, inputType, inputType}; - if (inputCount == 23) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - for (int i = 0; i < 4; ++i) { - inExpectedTypes.push_back(inputType); - } - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: { - if (inputCount != 15 || outputCount != 2) { - logInvalidInOutNumber(15, 2); - return ANEURALNETWORKS_BAD_DATA; - } - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - std::vector inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM, - OperandType::TENSOR_QUANT8_ASYMM}; - std::vector outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM, - OperandType::TENSOR_QUANT8_ASYMM}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_RANDOM_MULTINOMIAL: { - if (inputCount != 3 || outputCount != 1) { - logInvalidInOutNumber(3, 1); - return ANEURALNETWORKS_BAD_DATA; - } - OperandType inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - inputType, - OperandType::INT32, - OperandType::TENSOR_INT32, - }; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector outExpectedTypes = {OperandType::TENSOR_INT32}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_RNN: { - if (inputCount != 6 || outputCount != 2) { - logInvalidInOutNumber(6, 2); - return ANEURALNETWORKS_BAD_DATA; - } - OperandType inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::INT32, - }; - outExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, - }; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::INT32, - }; - outExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, - }; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SVDF: { - if (inputCount != 7 || outputCount != 2) { - logInvalidInOutNumber(7, 2); - return ANEURALNETWORKS_BAD_DATA; - } - OperandType inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes = { - inputType, inputType, inputType, inputType, - inputType, OperandType::INT32, OperandType::INT32, - }; - std::vector outExpectedTypes = {inputType, inputType}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_BATCH_TO_SPACE_ND: { - if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 3 or 2) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SPACE_TO_BATCH_ND: { - if ((inputCount != 4 && inputCount != 3) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 4 or 3) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - if (operands[inputIndexes[0]].zeroPoint != 0) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 4) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_PAD: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - if (operands[inputIndexes[0]].zeroPoint == 0) { - NN_RETURN_IF_ERROR( - validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } else { - NN_RETURN_IF_ERROR( - validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - } - inExpectedTypes = { - inputType, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_PAD_V2: { - if (inputCount != 3 || outputCount != 1) { - logInvalidInOutNumber(3, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - OperandType::FLOAT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - OperandType::FLOAT16, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - inExpectedTypes = { - inputType, - OperandType::TENSOR_INT32, - OperandType::INT32, - }; // TODO(b/116699425): Make it UINT8. - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_CAST: { - if (inputCount != 1 || outputCount != 1) { - logInvalidInOutNumber(1, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputOperand = operands[inputIndexes[0]]; - auto outputOperand = operands[outputIndexes[0]]; - auto inputType = inputOperand.type; - auto outputType = outputOperand.type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if ((inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) && - (outputType == OperandType::TENSOR_FLOAT16 || - outputType == OperandType::TENSOR_FLOAT32 || - outputType == OperandType::TENSOR_INT32 || - outputType == OperandType::TENSOR_QUANT8_ASYMM)) { - inExpectedTypes = {inputType}; - outExpectedTypes = {outputType}; - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else if (inputType == OperandType::TENSOR_BOOL8 || - inputType == OperandType::TENSOR_QUANT16_ASYMM || - inputType == OperandType::TENSOR_QUANT16_SYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || - inputType == OperandType::TENSOR_QUANT8_SYMM) { - inExpectedTypes = {inputType}; - outExpectedTypes = {inputType}; // Only identity CAST is supported. - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - LOG(ERROR) << "Unsupported data type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - // Validate that output shape is equal to input shape if dimensions - // are already known. - auto getNumberOfElements = [](const hardware::hidl_vec& dims) { - if (dims.size() == 0) { - return 0; - } - return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<>()); - }; - if (inputOperand.dimensions.size() != 0 && outputOperand.dimensions.size() != 0 && - getNumberOfElements(outputOperand.dimensions) != 0 && - inputOperand.dimensions != outputOperand.dimensions) { - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_MEAN: { - if (inputCount != 3 || outputCount != 1) { - logInvalidInOutNumber(3, 1); - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes = {inputType, OperandType::TENSOR_INT32, - OperandType::INT32}; - std::vector outExpectedTypes = {inputType}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_ARGMAX: - case ANEURALNETWORKS_ARGMIN: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_INT32}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_EXPAND_DIMS: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::INT32}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SPLIT: { - if (inputCount != 3) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)" - << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - if (inputType != OperandType::TENSOR_FLOAT16 && - inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM && - inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - std::vector inExpectedTypes = {inputType, OperandType::INT32, - OperandType::INT32}; - std::vector outExpectedTypes(outputCount, inputType); - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_MAXIMUM: - case ANEURALNETWORKS_MINIMUM: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes; - std::vector outExpectedTypes; - OperandType inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, inputType}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_GROUPED_CONV_2D: { - if ((inputCount != 12 && inputCount != 9) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 12 or 9) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - auto filterType = operands[inputIndexes[1]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::INT32, - OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (filterType != inputType && - filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - LOG(ERROR) << "Unsupported filter tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL && - std::get( - operands[inputIndexes[1]].extraParams) - .channelDim != 0) { - LOG(ERROR) << "Unsupported filter tensor channel dimension for operation " - << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - inExpectedTypes = { - inputType, filterType, OperandType::TENSOR_INT32, - OperandType::INT32, OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - if (inputCount == 12) { - std::vector explicitScalarTypes(3, OperandType::INT32); - inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(), - explicitScalarTypes.end()); - } - inExpectedTypes.push_back(OperandType::BOOL); - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_TILE: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::TENSOR_INT32}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_POW: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = {inputType, inputType}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_IF: { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - return validateIfOperation(inputCount, inputIndexes, outputCount, outputIndexes, - operands, helper) - ? ANEURALNETWORKS_NO_ERROR - : ANEURALNETWORKS_BAD_DATA; - } - case ANEURALNETWORKS_WHILE: { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - return validateWhileOperation(inputCount, inputIndexes, outputCount, outputIndexes, - operands, helper) - ? ANEURALNETWORKS_NO_ERROR - : ANEURALNETWORKS_BAD_DATA; - } - default: { - const OperationRegistration* operationRegistration = - BuiltinOperationResolver::get()->findOperation( - static_cast(opType)); - if (operationRegistration == nullptr) { - if (0 <= opType && opType < kNumberOfOperationTypes) { - LOG(ERROR) << opType << " not registered"; - } else { - LOG(ERROR) << "Operation type " << opType << " out of the range [0, " - << kNumberOfOperationTypes << ")"; - } - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - if (operationRegistration->validate == nullptr) { - LOG(ERROR) << "Incomplete operation registration: " << opType; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - OperationValidationContext context(operationRegistration->name, inputCount, - inputIndexes, outputCount, outputIndexes, - operands.data()); - const auto maybeVersion = operationRegistration->validate(&context); - if (!maybeVersion.has_value()) { - LOG(ERROR) << "Validation failed for operation " << opType << ": " - << maybeVersion.error(); - return ANEURALNETWORKS_BAD_DATA; - } - if (!validateVersion(&context, convert(halVersion), maybeVersion.value())) { - LOG(ERROR) << "Validation failed for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return ANEURALNETWORKS_NO_ERROR; - } - } -} - -ErrorStatus convertResultCodeToErrorStatus(int resultCode) { - switch (resultCode) { - case ANEURALNETWORKS_NO_ERROR: - return ErrorStatus::NONE; - - case ANEURALNETWORKS_BAD_DATA: - case ANEURALNETWORKS_UNEXPECTED_NULL: - return ErrorStatus::INVALID_ARGUMENT; - - case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE: - return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; - - case ANEURALNETWORKS_UNAVAILABLE_DEVICE: - return ErrorStatus::DEVICE_UNAVAILABLE; - - case ANEURALNETWORKS_BAD_STATE: - case ANEURALNETWORKS_INCOMPLETE: - case ANEURALNETWORKS_OP_FAILED: - case ANEURALNETWORKS_OUT_OF_MEMORY: - case ANEURALNETWORKS_UNMAPPABLE: - case ANEURALNETWORKS_DEAD_OBJECT: - return ErrorStatus::GENERAL_FAILURE; - - case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT: - return ErrorStatus::MISSED_DEADLINE_TRANSIENT; - case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT: - return ErrorStatus::MISSED_DEADLINE_PERSISTENT; - case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT: - return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT; - case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT: - return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT; - } - LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE"; - return ErrorStatus::GENERAL_FAILURE; -} - -int convertErrorStatusToResultCode(ErrorStatus status) { - switch (status) { - case ErrorStatus::NONE: - return ANEURALNETWORKS_NO_ERROR; - case ErrorStatus::DEVICE_UNAVAILABLE: - return ANEURALNETWORKS_UNAVAILABLE_DEVICE; - case ErrorStatus::GENERAL_FAILURE: - return ANEURALNETWORKS_OP_FAILED; - case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: - return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE; - case ErrorStatus::INVALID_ARGUMENT: - return ANEURALNETWORKS_BAD_DATA; - case ErrorStatus::MISSED_DEADLINE_TRANSIENT: - return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT; - case ErrorStatus::MISSED_DEADLINE_PERSISTENT: - return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT; - case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: - return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT; - case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: - return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT; - case ErrorStatus::DEAD_OBJECT: - return ANEURALNETWORKS_DEAD_OBJECT; - } - LOG(ERROR) << "Unknown ErrorStatus " << status << " mapped to ANEURALNETWORKS_OP_FAILED"; - return ANEURALNETWORKS_OP_FAILED; -} - -V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode) { - return convertToV1_3(convertResultCodeToErrorStatus(resultCode)); -} - -int convertErrorStatusToResultCode(V1_3::ErrorStatus status) { - return convertErrorStatusToResultCode(uncheckedConvert(status)); -} - -std::tuple, Timing> getExecutionResult( - V1_3::ErrorStatus status, const hardware::hidl_vec& outputShapes, - const V1_2::Timing& timing) { - return getExecutionResult(uncheckedConvert(status), uncheckedConvert(outputShapes), - uncheckedConvert(timing)); -} - -std::tuple, Timing> getExecutionResult( - ErrorStatus status, std::vector outputShapes, Timing timing) { - constexpr Timing kNoTiming = {std::numeric_limits::max(), - std::numeric_limits::max()}; - const int n = convertErrorStatusToResultCode(status); - if (status != ErrorStatus::NONE && status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE && - !outputShapes.empty()) { - LOG(ERROR) << "The driver returned OutputShapes when it shouldn't."; - outputShapes.clear(); - } - if (status != ErrorStatus::NONE && timing != kNoTiming) { - LOG(ERROR) << "The driver returned Timing when it shouldn't."; - timing = kNoTiming; - } - return {n, std::move(outputShapes), timing}; -} - -// Capabilities::operandPerformance utilities. -// The field Capabilities::operandPerformance is a vector sorted by the field -// Capabilities::OperandPerformance::type. - -template -hardware::hidl_vec> nonExtensionOperandPerformance( - V1_0::PerformanceInfo perf) { - using OpPerf = VersionedOperandPerformance; - - // Note: range presents enumerators in declaration order, not in numerical order. - static constexpr hardware::hidl_enum_range> kOperandTypeRange; - - std::vector ret; - ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin()); - for (VersionedOperandType type : kOperandTypeRange) { - if (static_cast(type) != V1_3::OperandType::SUBGRAPH) { - ret.push_back(OpPerf{type, perf}); - } - } - std::sort(ret.begin(), ret.end(), - [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; }); - - return ret; -} - -template hardware::hidl_vec -nonExtensionOperandPerformance(V1_0::PerformanceInfo perf); -template hardware::hidl_vec -nonExtensionOperandPerformance(V1_0::PerformanceInfo perf); - -template -void update(hardware::hidl_vec>* operandPerformance, - VersionedOperandType type, V1_0::PerformanceInfo perf) { - CHECK(operandPerformance != nullptr); - const auto it = - std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type, - [](const VersionedOperandPerformance& perf, - VersionedOperandType type) { return perf.type < type; }); - CHECK(it != operandPerformance->end()) - << toString(type) << " not in " << toString(*operandPerformance); - it->info = perf; -} - -void update(hardware::hidl_vec* operandPerformance, - V1_2::OperandType type, V1_0::PerformanceInfo perf) { - update(operandPerformance, type, perf); -} -void update(hardware::hidl_vec* operandPerformance, - V1_3::OperandType type, V1_0::PerformanceInfo perf) { - update(operandPerformance, type, perf); -} - -template -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec>& operandPerformance, - VersionedOperandType type) { - const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type, - [](const VersionedOperandPerformance& perf, - VersionedOperandType type) { - return static_cast(perf.type) < - static_cast(type); - }); - if (it == operandPerformance.end()) { - LOG(WARNING) << "No PerformanceInfo for " << toString(type); - return kNoPerformanceInfo; - } else { - return it->info; - } -} - -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec& operandPerformance, - V1_2::OperandType type) { - return lookup(operandPerformance, type); -} -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec& operandPerformance, - V1_3::OperandType type) { - CHECK(type != V1_3::OperandType::SUBGRAPH) - << "Use Capabilities::ifPerformance or Capabilities::whilePerformance"; - return lookup(operandPerformance, type); -} - -// Versioning - -// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM. -// This array must be in sorted order. -static const V1_3::OperandType kQuantized8PerformanceConsistentWithP[] = { - V1_3::OperandType::INT32, V1_3::OperandType::UINT32, V1_3::OperandType::TENSOR_INT32, - V1_3::OperandType::OEM, V1_3::OperandType::TENSOR_OEM_BYTE}; - -static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) { - const V1_0::PerformanceInfo quantized8Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM); - return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), - std::end(kQuantized8PerformanceConsistentWithP), - [quantized8Performance, &capabilities](V1_3::OperandType type) { - return quantized8Performance == - lookup(capabilities.operandPerformance, - static_cast(type)); - }); -} - -static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) { - const V1_0::PerformanceInfo quantized8Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM); - return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), - std::end(kQuantized8PerformanceConsistentWithP), - [quantized8Performance, &capabilities](V1_3::OperandType type) { - return quantized8Performance == - lookup(capabilities.operandPerformance, type); - }); -} - -static hardware::hidl_vec -makeQuantized8PerformanceConsistentWithP(V1_0::PerformanceInfo quantized8Performance) { - hardware::hidl_vec ret( - std::size(kQuantized8PerformanceConsistentWithP)); - std::transform(std::begin(kQuantized8PerformanceConsistentWithP), - std::end(kQuantized8PerformanceConsistentWithP), ret.begin(), - [quantized8Performance]( - V1_3::OperandType type) -> V1_2::Capabilities::OperandPerformance { - return {static_cast(type), quantized8Performance}; - }); - return ret; -} - -bool compliantWithV1_0(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_0(const V1_1::Capabilities& capabilities) { - return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance; -} - -bool compliantWithV1_0(const V1_2::Capabilities& capabilities) { - const V1_0::PerformanceInfo perfTensorFloat32 = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32); - const V1_0::PerformanceInfo perfFloat32 = - lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32); - if (perfTensorFloat32 != perfFloat32 || - perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || - perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_0(const V1_3::Capabilities& capabilities) { - const V1_0::PerformanceInfo perfTensorFloat32 = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32); - const V1_0::PerformanceInfo perfFloat32 = - lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32); - if (perfTensorFloat32 != perfFloat32 || - perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || - perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_1(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_1(const V1_1::Capabilities&) { - return true; -} - -bool compliantWithV1_1(const V1_2::Capabilities& capabilities) { - if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != - capabilities.relaxedFloat32toFloat16PerformanceScalar) || - (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) != - lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_1(const V1_3::Capabilities& capabilities) { - if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != - capabilities.relaxedFloat32toFloat16PerformanceScalar) || - (lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32) != - lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32))) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_2(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_2(const V1_1::Capabilities&) { - return true; -} - -bool compliantWithV1_2(const V1_2::Capabilities&) { - return true; -} - -bool compliantWithV1_2(const V1_3::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_1::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_2::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_3::Capabilities&) { - return true; -} - -V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) { - return status; -} - -V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) { - switch (status) { - case V1_3::ErrorStatus::NONE: - return V1_0::ErrorStatus::NONE; - case V1_3::ErrorStatus::DEVICE_UNAVAILABLE: - return V1_0::ErrorStatus::DEVICE_UNAVAILABLE; - case V1_3::ErrorStatus::GENERAL_FAILURE: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: - return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; - case V1_3::ErrorStatus::INVALID_ARGUMENT: - return V1_0::ErrorStatus::INVALID_ARGUMENT; - case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - } - LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE"; - return V1_0::ErrorStatus::GENERAL_FAILURE; -} - -V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) { - return static_cast(status); -} - -V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) { - return status; -} - -static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) { - return static_cast(type); -} - -static V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) { - return static_cast(type); -} - -V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type) { - return static_cast(type); -} - -static V1_1::OperationType convertToV1_1(V1_0::OperationType type) { - return static_cast(type); -} - -static V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) { - return static_cast(type); -} - -V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type) { - return static_cast(type); -} - -static V1_2::OperationType convertToV1_2(V1_0::OperationType type) { - return static_cast(type); -} - -static V1_2::OperationType convertToV1_2(V1_1::OperationType type) { - return static_cast(type); -} - -V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type) { - return static_cast(type); -} - -static V1_3::OperationType convertToV1_3(V1_0::OperationType type) { - return static_cast(type); -} - -static V1_3::OperationType convertToV1_3(V1_1::OperationType type) { - return static_cast(type); -} - -static V1_3::OperationType convertToV1_3(V1_2::OperationType type) { - return static_cast(type); -} - -V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) { - return capabilities; -} - -V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) { - if (!compliantWithV1_0(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_1::Capabilities to V1_0::Capabilities"; - } - return {.float32Performance = capabilities.float32Performance, - .quantized8Performance = capabilities.quantized8Performance}; -} - -V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) { - if (!compliantWithV1_0(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_2::Capabilities to V1_0::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), - .quantized8Performance = lookup(capabilities.operandPerformance, - V1_2::OperandType::TENSOR_QUANT8_ASYMM)}; -} - -V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) { - if (!compliantWithV1_0(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_3::Capabilities to V1_0::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32), - .quantized8Performance = lookup(capabilities.operandPerformance, - V1_3::OperandType::TENSOR_QUANT8_ASYMM)}; -} - -V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) { - return {.float32Performance = capabilities.float32Performance, - .quantized8Performance = capabilities.quantized8Performance, - .relaxedFloat32toFloat16Performance = capabilities.float32Performance}; -} - -V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) { - return capabilities; -} - -V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) { - if (!compliantWithV1_1(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_2::Capabilities to V1_1::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), - .quantized8Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM), - .relaxedFloat32toFloat16Performance = - capabilities.relaxedFloat32toFloat16PerformanceTensor}; -} - -V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) { - if (!compliantWithV1_1(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_3::Capabilities to V1_1::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32), - .quantized8Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM), - .relaxedFloat32toFloat16Performance = - capabilities.relaxedFloat32toFloat16PerformanceTensor}; -} - -V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) { - V1_2::Capabilities ret = { - .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance, - .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance, - .operandPerformance = - makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)}; - auto& opPerf = ret.operandPerformance; - opPerf.resize(opPerf.size() + 2); - opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, - capabilities.float32Performance}; - opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; - using OperandPerformance = V1_2::Capabilities::OperandPerformance; - std::sort(opPerf.begin(), opPerf.end(), - [](const OperandPerformance& a, const OperandPerformance& b) { - return a.type < b.type; - }); - return ret; -} - -V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) { - V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar = - capabilities.relaxedFloat32toFloat16Performance, - .relaxedFloat32toFloat16PerformanceTensor = - capabilities.relaxedFloat32toFloat16Performance, - .operandPerformance = makeQuantized8PerformanceConsistentWithP( - capabilities.quantized8Performance)}; - auto& opPerf = ret.operandPerformance; - opPerf.resize(opPerf.size() + 2); - opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, - capabilities.float32Performance}; - opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; - using OperandPerformance = V1_2::Capabilities::OperandPerformance; - std::sort(opPerf.begin(), opPerf.end(), - [](const OperandPerformance& a, const OperandPerformance& b) { - return a.type < b.type; - }); - return ret; -} - -V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) { - return capabilities; -} - -V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) { - V1_2::Capabilities ret = { - .relaxedFloat32toFloat16PerformanceScalar = - capabilities.relaxedFloat32toFloat16PerformanceScalar, - .relaxedFloat32toFloat16PerformanceTensor = - capabilities.relaxedFloat32toFloat16PerformanceTensor, - }; - const auto& inputOpPerf = capabilities.operandPerformance; - hardware::hidl_vec opPerfSupported; - opPerfSupported.resize(inputOpPerf.size()); - auto last = - std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(), - [](V1_3::Capabilities::OperandPerformance opPerf) { - return validOperandType(static_cast(opPerf.type)); - }); - opPerfSupported.resize(std::distance(opPerfSupported.begin(), last)); - - auto& convertedOpPerf = ret.operandPerformance; - convertedOpPerf.resize(opPerfSupported.size()); - std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(), - [](V1_3::Capabilities::OperandPerformance opPerf) { - return V1_2::Capabilities::OperandPerformance{ - static_cast(opPerf.type), opPerf.info}; - }); - return ret; -} - -V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) { - return convertToV1_3(convertToV1_2(capabilities)); -} - -V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) { - return convertToV1_3(convertToV1_2(capabilities)); -} - -V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) { - V1_3::Capabilities ret = { - .relaxedFloat32toFloat16PerformanceScalar = - capabilities.relaxedFloat32toFloat16PerformanceScalar, - .relaxedFloat32toFloat16PerformanceTensor = - capabilities.relaxedFloat32toFloat16PerformanceTensor, - .ifPerformance = kNoPerformanceInfo, - .whilePerformance = kNoPerformanceInfo, - }; - auto& opPerf = ret.operandPerformance; - opPerf.resize(capabilities.operandPerformance.size()); - std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), - opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) { - return V1_3::Capabilities::OperandPerformance{ - static_cast(opPerf.type), opPerf.info}; - }); - return ret; -} - -V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) { - return capabilities; -} - -static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) { - return {.type = uncheckedConvertToV1_0(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) { - return {.type = convertToV1_1(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static hardware::hidl_vec uncheckedConvertToV1_0( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_1( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_0::Operation& operation) { return convertToV1_1(operation); }); - return result; -} - -bool compliantWithV1_0(const V1_3::Operand& operand) { - return validOperandType(static_cast(operand.type)) && - (nonExtensionOperandTypeIsScalar(static_cast(operand.type)) || - operand.dimensions.size() != 0) && - compliantWithV1_0(operand.lifetime); -} - -bool compliantWithV1_2(const V1_3::Operand& operand) { - return validOperandType(static_cast(operand.type)) && - compliantWithV1_0(operand.lifetime); -} - -bool compliantWithV1_3(const V1_3::Operand& operand) { - return true; -} - -static bool compliantWith(HalVersion version, const V1_3::Model& model, - std::set* noncompliantOperations) { - // A boolean vector indicating whether each pool is compliant with the target HAL version. - std::vector isPoolCompliant(model.pools.size(), false); - std::transform( - model.pools.begin(), model.pools.end(), isPoolCompliant.begin(), - [version](const hardware::hidl_memory& pool) { return validatePool(pool, version); }); - - // A boolean vector indicating whether each operand is compliant with the target HAL version. - std::vector isOperandCompliant(model.main.operands.size(), false); - std::transform(model.main.operands.begin(), model.main.operands.end(), - isOperandCompliant.begin(), - [&isPoolCompliant, version](const V1_3::Operand& op) { - bool is_operand_compliant = false; - switch (version) { - case HalVersion::UNKNOWN: - is_operand_compliant = false; - break; - case HalVersion::V1_0: - is_operand_compliant = compliantWithV1_0(op); - break; - case HalVersion::V1_1: - // There is no V1_1::Operand -- both V1_0::Model - // and V1_1::Model use V1_0::Operand. - is_operand_compliant = compliantWithV1_0(op); - break; - case HalVersion::V1_2: - is_operand_compliant = compliantWithV1_2(op); - break; - case HalVersion::V1_3: - is_operand_compliant = compliantWithV1_3(op); - break; - } - return is_operand_compliant && - !(op.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE && - !isPoolCompliant[op.location.poolIndex]); - }); - - auto allOperandsCompliant = [&isOperandCompliant](const hardware::hidl_vec& indices) { - return std::all_of( - indices.begin(), indices.end(), - [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; }); - }; - - auto localValidateOperation = [&model, version, - &allOperandsCompliant](const V1_3::Operation& op) { - if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false; - int error = validateOperation(static_cast(op.type), op.inputs.size(), - op.inputs.size() > 0 ? op.inputs.data() : nullptr, - op.outputs.size(), - op.outputs.size() > 0 ? op.outputs.data() : nullptr, - uncheckedConvert(model.main.operands), version); - return error == ANEURALNETWORKS_NO_ERROR; - }; - - if (noncompliantOperations) { - CHECK(noncompliantOperations->empty()); - for (uint32_t idx = 0; idx < model.main.operations.size(); ++idx) { - if (!localValidateOperation(model.main.operations[idx])) { - noncompliantOperations->insert(idx); - } - } - return noncompliantOperations->empty(); - } else { - return std::all_of(model.main.operations.begin(), model.main.operations.end(), - localValidateOperation); - } -} - -bool compliantWithV1_0(const V1_0::Model& model) { - return true; -} - -bool compliantWithV1_0(const V1_1::Model& model) { - // In addition to new enumeration values being introduced in V1_1::Model, a - // new flag was introduced to indicate whether or not float32 data can be - // calculated using float16 units. This 'relaxComputationFloat32toFloat16' - // flag is not relevant in whether a V1_1::Model is compliant with a - // V1_0::Model because all 1.0 drivers require strict calculation by default - // in the P NN runtime. Even if fp16 calculations are allowed, they can - // still be computed by a strict fp32 driver. - auto operands = uncheckedConvert(convertToV1_3(model.operands)); - return std::all_of(model.operations.begin(), model.operations.end(), - [&operands](const V1_1::Operation& op) { - int error = validateOperation( - static_cast(op.type), op.inputs.size(), - op.inputs.size() > 0 ? op.inputs.data() : nullptr, - op.outputs.size(), - op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands, - HalVersion::V1_0); - return error == ANEURALNETWORKS_NO_ERROR; - }); -} - -bool compliantWithV1_0(const V1_2::Model& model, std::set* noncompliantOperations) { - return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations); -} - -bool compliantWithV1_0(const V1_3::Model& model, std::set* noncompliantOperations) { - return compliantWith(HalVersion::V1_0, model, noncompliantOperations); -} - -bool compliantWithV1_1(const V1_0::Model&) { - return true; -} - -bool compliantWithV1_1(const V1_1::Model&) { - return true; -} - -bool compliantWithV1_1(const V1_2::Model& model, std::set* noncompliantOperations) { - return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations); -} - -bool compliantWithV1_1(const V1_3::Model& model, std::set* noncompliantOperations) { - return compliantWith(HalVersion::V1_1, model, noncompliantOperations); -} - -bool compliantWithV1_2(const V1_0::Model&) { - return true; -} - -bool compliantWithV1_2(const V1_1::Model&) { - return true; -} - -bool compliantWithV1_2(const V1_2::Model&, std::set* noncompliantOperations) { - return true; -} - -bool compliantWithV1_2(const V1_3::Model& model, std::set* noncompliantOperations) { - return compliantWith(HalVersion::V1_2, model, noncompliantOperations); -} - -static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) { - return {.type = uncheckedConvertToV1_0(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) { - return {.type = uncheckedConvertToV1_0(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) { - return {.type = uncheckedConvertToV1_1(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) { - return {.type = uncheckedConvertToV1_1(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) { - return {.type = convertToV1_2(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) { - return {.type = convertToV1_2(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) { - return {.type = uncheckedConvertToV1_2(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) { - return {.type = convertToV1_3(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) { - return {.type = convertToV1_3(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) { - return {.type = convertToV1_3(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static hardware::hidl_vec uncheckedConvertToV1_0( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); }); - return result; -} - -static hardware::hidl_vec uncheckedConvertToV1_0( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); }); - return result; -} - -static hardware::hidl_vec uncheckedConvertToV1_2( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); }); - return result; -} - -static hardware::hidl_vec uncheckedConvertToV1_1( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); }); - return result; -} - -static hardware::hidl_vec uncheckedConvertToV1_1( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_2( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_0::Operation& operation) { return convertToV1_2(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_2( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_1::Operation& operation) { return convertToV1_2(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_3( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_0::Operation& operation) { return convertToV1_3(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_3( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_1::Operation& operation) { return convertToV1_3(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_3( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_2::Operation& operation) { return convertToV1_3(operation); }); - return result; -} - -static bool compliantWithV1_0(const V1_2::OperandType& operandType) { - return validOperandType(static_cast(operandType)); -} - -static bool compliantWithV1_0(const V1_3::OperandType& operandType) { - return validOperandType(static_cast(operandType)); -} - -static bool compliantWithV1_2(const V1_3::OperandType& operandType) { - return validOperandType(static_cast(operandType)); -} - -V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) { - if (!compliantWithV1_0(operandType)) { - LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) - << " from V1_2::OperandType to V1_0::OperandType"; - } - return static_cast(operandType); -} - -V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) { - return static_cast(operandType); -} - -V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) { - if (!compliantWithV1_2(operandType)) { - LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) - << " from V1_3::OperandType to V1_2::OperandType"; - } - return static_cast(operandType); -} - -V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) { - if (!compliantWithV1_0(operandType)) { - LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) - << " from V1_3::Operand to V1_0::Operand"; - } - return static_cast(operandType); -} - -bool compliantWithV1_0(V1_0::OperandLifeTime lifetime) { - return true; -} - -bool compliantWithV1_0(V1_3::OperandLifeTime lifetime) { - return lifetime != V1_3::OperandLifeTime::SUBGRAPH; -} - -bool compliantWithV1_3(V1_0::OperandLifeTime lifetime) { - return true; -} - -bool compliantWithV1_3(V1_3::OperandLifeTime lifetime) { - return true; -} - -V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime) { - return lifetime; -} - -V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime) { - if (!compliantWithV1_0(lifetime)) { - LOG(ERROR) << "Upcasting non-compliant lifetime " << toString(lifetime) - << " from V1_3 to V1_0"; - } - return static_cast(lifetime); -} - -V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime) { - return static_cast(lifetime); -} - -V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime) { - return lifetime; -} - -V1_0::Operand convertToV1_0(const V1_2::Operand& operand) { - return {.type = convertToV1_0(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_0(operand.lifetime), - .location = operand.location}; -} - -V1_0::Operand convertToV1_0(const V1_3::Operand& operand) { - return {.type = convertToV1_0(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_0(operand.lifetime), - .location = operand.location}; -} - -V1_2::Operand convertToV1_2(const V1_0::Operand& operand) { - return {.type = convertToV1_2(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = operand.lifetime, - .location = operand.location}; -} - -V1_2::Operand convertToV1_2(const V1_3::Operand& operand) { - return {.type = convertToV1_2(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = static_cast(operand.lifetime), - .location = operand.location, - .extraParams = operand.extraParams}; -} - -V1_3::Operand convertToV1_3(const V1_0::Operand& operand) { - return {.type = static_cast(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_3(operand.lifetime), - .location = operand.location}; -} - -V1_3::Operand convertToV1_3(const V1_2::Operand& operand) { - return {.type = static_cast(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_3(operand.lifetime), - .location = operand.location, - .extraParams = operand.extraParams}; -} - -V1_3::Operand convertToV1_3(const V1_3::Operand& operand) { - return operand; -} - -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { - return operands; -} - -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_2::Operand& operand) { return convertToV1_0(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_3::Operand& operand) { return convertToV1_0(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_0::Operand& operand) { return convertToV1_2(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { - return operands; -} - -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_3::Operand& operand) { return convertToV1_2(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_0::Operand& operand) { return convertToV1_3(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_2::Operand& operand) { return convertToV1_3(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { - return operands; -} - -V1_0::Model convertToV1_0(const V1_0::Model& model) { - return model; -} - -V1_0::Model convertToV1_0(const V1_1::Model& model) { - if (!compliantWithV1_0(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_1::Model to V1_0::Model"; - } - return {.operands = model.operands, - .operations = uncheckedConvertToV1_0(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools}; -} - -V1_0::Model convertToV1_0(const V1_2::Model& model) { - if (!compliantWithV1_0(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_2::Model to V1_0::Model"; - } - return {.operands = convertToV1_0(model.operands), - .operations = uncheckedConvertToV1_0(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools}; -} - -V1_0::Model convertToV1_0(const V1_3::Model& model) { - if (!compliantWithV1_0(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_3::Model to V1_0::Model"; - } - return {.operands = convertToV1_0(model.main.operands), - .operations = uncheckedConvertToV1_0(model.main.operations), - .inputIndexes = model.main.inputIndexes, - .outputIndexes = model.main.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools}; -} - -V1_1::Model convertToV1_1(const V1_0::Model& model) { - return {.operands = model.operands, - .operations = convertToV1_1(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = false}; -} - -V1_1::Model convertToV1_1(const V1_1::Model& model) { - return model; -} - -V1_1::Model convertToV1_1(const V1_2::Model& model) { - if (!compliantWithV1_1(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_2::Model to V1_1::Model"; - } - return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical. - .operations = uncheckedConvertToV1_1(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_1::Model convertToV1_1(const V1_3::Model& model) { - if (!compliantWithV1_1(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_3::Model to V1_1::Model"; - } - return {// Operands in 1.1 and 1.0 are identical. - .operands = convertToV1_0(model.main.operands), - .operations = uncheckedConvertToV1_1(model.main.operations), - .inputIndexes = model.main.inputIndexes, - .outputIndexes = model.main.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_2::Model convertToV1_2(const V1_0::Model& model) { - return {.operands = convertToV1_2(model.operands), - .operations = convertToV1_2(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = false}; -} - -V1_2::Model convertToV1_2(const V1_1::Model& model) { - return {.operands = convertToV1_2(model.operands), - .operations = convertToV1_2(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_2::Model convertToV1_2(const V1_2::Model& model) { - return model; -} - -V1_2::Model convertToV1_2(const V1_3::Model& model) { - if (!compliantWithV1_2(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_3::Model to V1_2::Model"; - } - return {.operands = convertToV1_2(model.main.operands), - .operations = uncheckedConvertToV1_2(model.main.operations), - .inputIndexes = model.main.inputIndexes, - .outputIndexes = model.main.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = model.extensionNameToPrefix}; -} - -V1_3::Model convertToV1_3(const V1_0::Model& model) { - return {.main = {.operands = convertToV1_3(model.operands), - .operations = convertToV1_3(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes}, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = false}; -} - -V1_3::Model convertToV1_3(const V1_1::Model& model) { - return {.main = {.operands = convertToV1_3(model.operands), - .operations = convertToV1_3(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes}, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_3::Model convertToV1_3(const V1_2::Model& model) { - return {.main = {.operands = convertToV1_3(model.operands), - .operations = convertToV1_3(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes}, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = model.extensionNameToPrefix}; -} - -V1_3::Model convertToV1_3(const V1_3::Model& model) { - return model; -} - -bool compliantWithV1_0(const V1_0::Request& request) { - return true; -} - -bool compliantWithV1_0(const V1_3::Request& request) { - return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { - if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { - return false; - } - const auto& name = pool.hidlMemory().name(); - return name == "ashmem" || name == "mmap_fd"; - }); -} - -bool compliantWithV1_2(const V1_3::Request& request) { - return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { - if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { - return false; - } - const auto& name = pool.hidlMemory().name(); - return name == "ashmem" || name == "mmap_fd" || name == "hardware_buffer_blob" || - name == "hardware_buffer"; - }); -} - -static hardware::hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) { - switch (pool.getDiscriminator()) { - case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory: - return pool.hidlMemory(); - case V1_3::Request::MemoryPool::hidl_discriminator::token: - return hardware::hidl_memory{}; - } -} - -static V1_3::Request::MemoryPool convertToV1_3(const hardware::hidl_memory& pool) { - V1_3::Request::MemoryPool ret; - ret.hidlMemory(pool); - return ret; -} - -V1_0::Request convertToV1_0(const V1_0::Request& request) { - return request; -} - -static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) { - hardware::hidl_vec pools(request.pools.size()); - std::transform(request.pools.begin(), request.pools.end(), pools.begin(), - [](const auto& pool) { return convertToV1_0(pool); }); - return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; -} - -V1_0::Request convertToV1_0(const V1_3::Request& request) { - if (!compliantWithV1_0(request)) { - LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) - << " from V1_3::Request to V1_0::Request of version 1.0"; - } - return uncheckedConvertToV1_0(request); -} - -V1_0::Request convertToV1_2(const V1_3::Request& request) { - if (!compliantWithV1_2(request)) { - LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) - << " from V1_3::Request to V1_0::Request of version 1.2"; - } - return uncheckedConvertToV1_0(request); -} - -V1_3::Request convertToV1_3(const V1_0::Request& request) { - hardware::hidl_vec pools(request.pools.size()); - std::transform(request.pools.begin(), request.pools.end(), pools.begin(), - [](const auto& pool) { return convertToV1_3(pool); }); - return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; -} - -V1_3::Request convertToV1_3(const V1_3::Request& request) { - return request; -} - -FenceState syncWait(int fd, int timeout) { - // This implementation is directly based on the ::sync_wait() implementation. - - struct pollfd fds; - int ret; - - if (fd < 0) { - errno = EINVAL; - return FenceState::UNKNOWN; - } - - fds.fd = fd; - fds.events = POLLIN; - - do { - ret = poll(&fds, 1, timeout); - if (ret > 0) { - if (fds.revents & POLLNVAL) { - errno = EINVAL; - return FenceState::UNKNOWN; - } - if (fds.revents & POLLERR) { - errno = EINVAL; - return FenceState::ERROR; - } - return FenceState::SIGNALED; - } else if (ret == 0) { - errno = ETIME; - return FenceState::ACTIVE; - } - } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); - - return FenceState::UNKNOWN; -} - -#ifdef NN_DEBUGGABLE -uint32_t getProp(const char* str, uint32_t defaultValue) { - const std::string propStr = android::base::GetProperty(str, ""); - if (propStr.size() > 0) { - return std::stoi(propStr); - } else { - return defaultValue; - } -} -#endif // NN_DEBUGGABLE - -ErrorStatus uncheckedConvert(V1_0::ErrorStatus status) { - return nnTryGetValue(convert(status)); -} - -ErrorStatus uncheckedConvert(V1_3::ErrorStatus status) { - return nnTryGetValue(convert(status)); -} - -OperandType uncheckedConvert(V1_3::OperandType operandType) { - return nnTryGetValue(convert(operandType)); -} - -OperationType uncheckedConvert(V1_3::OperationType operandType) { - return nnTryGetValue(convert(operandType)); -} - -Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime) { - return nnTryGetValue(convert(lifetime)); -} - -MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure) { - return nnTryGetValue(convert(measure)); -} - -DataLocation uncheckedConvert(const V1_0::DataLocation& location) { - return nnTryGetValue(convert(location)); -} - -Operand uncheckedConvert(const V1_3::Operand& operand) { - return nnTryGetValue(convert(operand)); -} - -Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params) { - return nnTryGetValue(convert(params)); -} - -Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params) { - return nnTryGetValue(convert(params)); -} - -Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec& params) { - return params; -} - -Operation uncheckedConvert(const V1_3::Operation& operation) { - return nnTryGetValue(convert(operation)); -} - -template -static std::vector convertVec(const hardware::hidl_vec& items) { - std::vector result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const HalType& item) { return uncheckedConvert(item); }); - return result; -} - -Model uncheckedConvert(const V1_3::Model& model) { - return nnTryGetValue(convert(model)); -} - -Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph) { - return nnTryGetValue(convert(subgraph)); -} - -Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix& x) { - return nnTryGetValue(convert(x)); -} - -Request uncheckedConvert(const V1_3::Request& request) { - return nnTryGetValue(convert(request)); -} - -Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument) { - return nnTryGetValue(convert(requestArgument)); -} - -Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool) { - return nnTryGetValue(convert(memoryPool)); -} - -OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape) { - return nnTryGetValue(convert(outputShape)); -} - -std::vector uncheckedConvert( - const hardware::hidl_vec& outputShapes) { - return convertVec(outputShapes); -} - -Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities) { - return nnTryGetValue(convert(capabilities)); -} - -Capabilities::OperandPerformance uncheckedConvert( - const V1_3::Capabilities::OperandPerformance& operandPerformance) { - return nnTryGetValue(convert(operandPerformance)); -} - -Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo) { - return nnTryGetValue(convert(performanceInfo)); -} - -Extension uncheckedConvert(const V1_2::Extension& extension) { - return nnTryGetValue(convert(extension)); -} - -std::vector uncheckedConvert(const hardware::hidl_vec& extensions) { - return convertVec(extensions); -} - -Extension::OperandTypeInformation uncheckedConvert( - const V1_2::Extension::OperandTypeInformation& info) { - return nnTryGetValue(convert(info)); -} - -OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration) { - return nnTryGetValue(convert(timeoutDuration)); -} - -Timing uncheckedConvert(const V1_2::Timing& timing) { - return nnTryGetValue(convert(timing)); -} - -V1_0::ErrorStatus convertToV1_0(ErrorStatus status) { - return static_cast(static_cast(status)); -} - -V1_3::ErrorStatus convertToV1_3(ErrorStatus status) { - return nnTryGetValue(V1_3::utils::convert(status)); -} - -V1_3::OperandType convertToV1_3(OperandType operandType) { - return nnTryGetValue(V1_3::utils::convert(operandType)); -} - -V1_3::OperationType convertToV1_3(OperationType operandType) { - return nnTryGetValue(V1_3::utils::convert(operandType)); -} - -V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime) { - return nnTryGetValue(V1_3::utils::convert(lifetime)); -} - -V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference) { - return nnTryGetValue(V1_1::utils::convert(preference)); -} - -V1_3::Priority convertToV1_3(Priority priority) { - return nnTryGetValue(V1_3::utils::convert(priority)); -} - -V1_2::MeasureTiming convertToV1_2(MeasureTiming measure) { - return nnTryGetValue(V1_2::utils::convert(measure)); -} - -V1_0::DataLocation convertToV1_0(const DataLocation& location) { - return nnTryGetValue(V1_0::utils::convert(location)); -} - -V1_3::Operand convertToV1_3(const Operand& operand) { - return nnTryGetValue(V1_3::utils::convert(operand)); -} - -V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params) { - return nnTryGetValue(V1_2::utils::convert(params)); -} - -V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params) { - return nnTryGetValue(V1_2::utils::convert(params)); -} - -hardware::hidl_vec uncheckedConvert(const Operand::ExtensionParams& params) { - return params; -} - -V1_3::Operation convertToV1_3(const Operation& operation) { - return nnTryGetValue(V1_3::utils::convert(operation)); -} - -template -static hardware::hidl_vec convertVecToV1_0(const std::vector& items) { - hardware::hidl_vec result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const CanonicalType& item) { return convertToV1_0(item); }); - return result; -} - -template -static hardware::hidl_vec convertVecToV1_2(const std::vector& items) { - hardware::hidl_vec result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const CanonicalType& item) { return convertToV1_2(item); }); - return result; -} - -template -static hardware::hidl_vec convertVecToV1_3(const std::vector& items) { - hardware::hidl_vec result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const CanonicalType& item) { return convertToV1_3(item); }); - return result; -} - -V1_2::OutputShape convertToV1_2(const OutputShape& outputShape) { - return nnTryGetValue(V1_2::utils::convert(outputShape)); -} - -hardware::hidl_vec convertToV1_2(const std::vector& outputShapes) { - return convertVecToV1_2(outputShapes); -} - -V1_3::Model convertToV1_3(const Model& model) { - return nnTryGetValue(V1_3::utils::convert(model)); -} - -V1_3::Subgraph convertToV1_3(const Model::Subgraph& subgraph) { - return nnTryGetValue(V1_3::utils::convert(subgraph)); -} - -V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix& x) { - return nnTryGetValue(V1_2::utils::convert(x)); -} - -V1_3::Request convertToV1_3(const Request& request) { - return nnTryGetValue(V1_3::utils::convert(request)); -} - -V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument) { - return nnTryGetValue(V1_0::utils::convert(requestArgument)); -} - -V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool) { - return nnTryGetValue(V1_3::utils::convert(memoryPool)); -} - -std::vector uncheckedConvert( - const hardware::hidl_vec& memoryPools) { - return convertVec(memoryPools); -} - -V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint) { - return nnTryGetValue(V1_3::utils::convert(timePoint)); -} - -V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration) { - return nnTryGetValue(V1_3::utils::convert(timeoutDuration)); -} - -V1_2::Timing convertToV1_2(const Timing& timing) { - return nnTryGetValue(V1_2::utils::convert(timing)); -} - -V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole) { - return nnTryGetValue(V1_3::utils::convert(bufferRole)); -} - -hardware::hidl_vec convertToV1_3(const std::vector& bufferRoles) { - return convertVecToV1_3(bufferRoles); -} - -hardware::hidl_vec convertToV1_0(const Model::OperandValues& operandValues) { - return nnTryGetValue(V1_0::utils::convert(operandValues)); -} - -hardware::hidl_memory convertToV1_0(const Memory& memory) { - return nnTryGetValue(V1_0::utils::convert(memory)); -} - -Memory uncheckedConvert(const hardware::hidl_memory& memory) { - return nnTryGetValue(convert(memory)); -} - -hardware::hidl_vec convertToV1_0(const std::vector& memories) { - return convertVecToV1_0(memories); -} - -std::vector uncheckedConvert(const hardware::hidl_vec& memories) { - return convertVec(memories); -} - -std::vector uncheckedConvert(const hardware::hidl_vec& subgraphs) { - return convertVec(subgraphs); -} - -std::vector uncheckedConvert(const hardware::hidl_vec& operands) { - return convertVec(operands); -} - -} // namespace nn -} // namespace android diff --git a/nn/common/include/LegacyHalUtils.h b/nn/common/include/LegacyHalUtils.h new file mode 100644 index 000000000..cdaf91172 --- /dev/null +++ b/nn/common/include/LegacyHalUtils.h @@ -0,0 +1,611 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H +#define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H + +#include + +#include +#include +#include +#include +#include + +#include "HalInterfaces.h" +#include "NeuralNetworks.h" +#include "OperationResolver.h" +#include "ValidateHal.h" +#include "nnapi/TypeUtils.h" +#include "nnapi/Types.h" + +namespace android { +namespace nn { + +// The number of data types (OperandCode) defined in NeuralNetworks.h. +const int kNumberOfDataTypes = 16; + +// The number of operation types (OperationCode) defined in NeuralNetworks.h. +const int kNumberOfOperationTypes = 102; +static_assert(kNumberOfOperationTypes == BuiltinOperationResolver::kNumberOfOperationTypes); + +// The number of execution preferences defined in NeuralNetworks.h. +const int kNumberOfPreferences = 3; + +// The number of data types (OperandCode) defined in NeuralNetworksOEM.h. +const int kNumberOfDataTypesOEM = 2; + +// The number of operation types (OperationCode) defined in NeuralNetworksOEM.h. +const int kNumberOfOperationTypesOEM = 1; + +// The lowest number assigned to any OEM Code in NeuralNetworksOEM.h. +const int kOEMCodeBase = 10000; + +/* IMPORTANT: if you change the following list, don't + * forget to update the corresponding 'tags' table in + * the initVlogMask() function implemented in Utils.cpp. + */ +enum VLogFlags { MODEL = 0, COMPILATION, EXECUTION, CPUEXE, MANAGER, DRIVER, MEMORY }; + +#define VLOG_IS_ON(TAG) ((vLogMask & (1 << (TAG))) != 0) + +#define VLOG(TAG) \ + if (LIKELY(!VLOG_IS_ON(TAG))) \ + ; \ + else \ + LOG(INFO) + +extern int vLogMask; +void initVLogMask(); + +#ifdef NN_DEBUGGABLE +#define SHOW_IF_DEBUG(msg) msg +#else +#define SHOW_IF_DEBUG(msg) "" +#endif + +// DEPRECATED(b/118737105). Use CHECK. +#define nnAssert(v) CHECK(v) + +#define NN_RETURN_IF_ERROR(expr) \ + do { \ + int _errorCode = (expr); \ + if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \ + return _errorCode; \ + } \ + } while (0) + +// Make an TimeoutDuration from a duration in nanoseconds. If the value exceeds +// the max duration, return the maximum expressible duration. +TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds); + +// Type to represent a deadline time point across processes. +using Deadline = std::chrono::steady_clock::time_point; + +// Make an Deadline from a duration. If the sum of the current time and the +// duration exceeds the max time, return a time point holding the maximum +// expressible time. +Deadline makeDeadline(TimeoutDuration duration); +inline Deadline makeDeadline(uint64_t duration) { + return makeDeadline(makeTimeoutDuration(duration)); +} + +// Convenience function. If the duration is provided, this function creates a +// Deadline using makeDeadline. If the duration is not provided, this function +// returns std::nullopt. +inline std::optional makeDeadline(OptionalTimeoutDuration duration) { + return duration.has_value() ? makeDeadline(*duration) : std::optional{}; +} +inline std::optional makeDeadline(std::optional duration) { + return duration.has_value() ? makeDeadline(*duration) : std::optional{}; +} + +// Make an optional Deadline from an OptionalTimePoint. If +// timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a +// time point holding the maximum Deadline. If the OptionalTimePoint is none, +// this function returns std::nullopt. +std::optional makeDeadline(const V1_3::OptionalTimePoint& timePoint); + +// Returns true if the deadline has passed. Returns false if either the deadline +// has not been exceeded or if the deadline is not present. +bool hasDeadlinePassed(const std::optional& deadline); + +// Make an OptionalTimePoint from an optional Deadline. If the Deadline is not +// provided, this function returns none for OptionalTimePoint. +OptionalTimePoint makeTimePoint(const std::optional& deadline); + +// Ensure that every user of FalseyErrorStream is linked to the +// correct instance, using the correct LOG_TAG +namespace { + +template +struct VersionedType {}; + +template <> +struct VersionedType { + using OperandPerformance = V1_2::Capabilities::OperandPerformance; + using OperandType = V1_2::OperandType; +}; + +template <> +struct VersionedType { + using OperandPerformance = V1_3::Capabilities::OperandPerformance; + using OperandType = V1_3::OperandType; +}; + +template +using VersionedOperandPerformance = typename VersionedType::OperandPerformance; +template +using VersionedOperandType = typename VersionedType::OperandType; + +} // namespace + +// Return a vector with one entry for each non-extension OperandType except +// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be +// sorted by OperandType. +// +// Control flow (OperandType::SUBGRAPH) operation performance is specified +// separately using Capabilities::ifPerformance and +// Capabilities::whilePerformance. +template +hardware::hidl_vec> nonExtensionOperandPerformance( + V1_0::PerformanceInfo perf); + +// Update the vector entry corresponding to the specified OperandType with the +// specified PerformanceInfo value. The vector must already have an entry for +// that OperandType, and must be sorted by OperandType. +void update(hardware::hidl_vec* operandPerformance, + V1_2::OperandType type, V1_0::PerformanceInfo perf); +void update(hardware::hidl_vec* operandPerformance, + V1_3::OperandType type, V1_0::PerformanceInfo perf); + +// Look for a vector entry corresponding to the specified OperandType. If +// found, return the associated PerformanceInfo. If not, return a pessimistic +// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType. +V1_0::PerformanceInfo lookup( + const hardware::hidl_vec& operandPerformance, + V1_2::OperandType type); +V1_0::PerformanceInfo lookup( + const hardware::hidl_vec& operandPerformance, + V1_3::OperandType type); + +// Returns true if an operand type is an extension type. +bool isExtensionOperandType(V1_3::OperandType type); + +// Returns true if an operation type is an extension type. +bool isExtensionOperationType(V1_3::OperationType type); + +// Returns the amount of space needed to store a value of the specified +// dimensions and type. For a tensor with unspecified rank or at least one +// unspecified dimension, returns zero. +// +// Aborts if the specified type is an extension type. +// Aborts if the size would overflow the return type. +// +// See also TypeManager::getSizeOfData(OperandType, const std::vector&). +uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, + const std::vector& dimensions); +uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions); + +// Returns the amount of space needed to store a value of the dimensions and +// type of this operand. For a tensor with unspecified rank or at least one +// unspecified dimension, returns zero. +// +// Aborts if the specified type is an extension type. +// Aborts if the size would overflow the return type. +// +// See also TypeManager::getSizeOfData(const Operand&). +inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) { + return nonExtensionOperandSizeOfData(operand.type, operand.dimensions); +} +inline uint32_t nonExtensionOperandSizeOfData(const V1_3::Operand& operand) { + return nonExtensionOperandSizeOfData(operand.type, operand.dimensions); +} + +// Returns the amount of space needed to store a value of the specified +// dimensions and element size. For a tensor with unspecified rank or at least +// one unspecified dimension, returns zero. +// +// Aborts if the size would overflow the return type. +// +// See also TypeManager::getSizeOfData(const Operand&). +uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector& dimensions); + +// Returns true if the amount of space needed to store a value of the specified +// dimensions and element size overflows the uint32_t type. +// +// Aborts if the specified type is an extension type. +// +// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector&). +bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, + const std::vector& dimensions); +bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, + const std::vector& dimensions); + +// Returns true if the amount of space needed to store a value of the specified +// dimensions and element size overflows the uint32_t type. +// +// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector&). +bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector& dimensions); + +// Returns true if a non-extension operand type is a scalar type. +// +// Aborts if the specified type is an extension type. +// +// See also TypeManager::isTensorType(OperandType). +bool nonExtensionOperandTypeIsScalar(int type); + +// Returns the name of the operation type in ASCII. +std::string getOperationName(V1_3::OperationType opCode); + +// Returns the name of the operand type in ASCII. +std::string getOperandTypeName(V1_3::OperandType type); + +// Whether an operand of tensor type has unspecified dimensions. +// +// Undefined behavior if the operand type is a scalar type. +bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount); +bool tensorHasUnspecifiedDimensions(V1_3::OperandType type, + const std::vector& dimensions); +bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector& dimensions); +bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions); +bool tensorHasUnspecifiedDimensions(const Operand& operand); +bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand); +bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type); + +// Returns the number of padding bytes needed to align data of the +// specified length. It aligns object of length: +// 2, 3 on a 2 byte boundary, +// 4+ on a 4 byte boundary. +// We may want to have different alignments for tensors. +// TODO: This is arbitrary, more a proof of concept. We need +// to determine what this should be. +uint32_t alignBytesNeeded(uint32_t index, size_t length); + +// Does a detailed LOG(INFO) of the model +void logModelToInfo(const V1_0::Model& model); +void logModelToInfo(const V1_1::Model& model); +void logModelToInfo(const V1_2::Model& model); +void logModelToInfo(const V1_3::Model& model); +void logModelToInfo(const Model& model); + +inline std::string toString(uint32_t obj) { + return std::to_string(obj); +} + +template +std::string toString(const std::vector& range) { + std::string os = "["; + for (size_t i = 0; i < range.size(); ++i) { + os += (i == 0 ? "" : ", ") + toString(range[i]); + } + return os += "]"; +} + +template +std::string toString(const std::pair& pair) { + std::ostringstream oss; + oss << "(" << pair.first << ", " << pair.second << ")"; + return oss.str(); +} + +inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) { + return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM); +} + +bool validateOperandSymmPerChannelQuantParams( + const V1_3::Operand& halOperand, + const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag); + +// Validates an operand type. +// +// extensionOperandTypeInfo must be nullptr iff the type is not an extension type. +// +// If allowPartial is true, the dimensions may be underspecified. +int validateOperandType(const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, + const char* tag, bool allowPartial); +int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, + const char* tag); + +// A set of functions to help validate models containing IF or WHILE operations. +struct SubgraphValidationHelper { + // Checks if a given operand is a SUBGRAPH operand with a valid offset. + std::function isValidSubgraphReference; + // Gets the input count of a subgraph referenced by a given operand. + std::function getSubgraphInputCount; + // Gets the output count of a subgraph referenced by a given operand. + std::function getSubgraphOutputCount; + // Gets the specified input operand of a subgraph referenced by a given operand. + std::function getSubgraphInputOperand; + // Gets the specified output operand of a subgraph referenced by a given operand. + std::function getSubgraphOutputOperand; + // Whether control flow operations with inner or outer input or output + // operands of unknown size are allowed. + bool allowControlFlowOperationWithOperandOfUnknownSize; +}; + +// Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the +// provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA. +// The last argument is only used for validating IF and WHILE operations. +int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, const std::vector& operands, + HalVersion halVersion, const SubgraphValidationHelper& helper); + +inline size_t getSizeFromInts(int lower, int higher) { + return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32); +} + +// Convert ANEURALNETWORKS_* result code to ErrorStatus. +// Not guaranteed to be a 1-to-1 mapping. +ErrorStatus convertResultCodeToErrorStatus(int resultCode); +V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode); + +// Convert ErrorStatus to ANEURALNETWORKS_* result code. +// Not guaranteed to be a 1-to-1 mapping. +int convertErrorStatusToResultCode(ErrorStatus status); +int convertErrorStatusToResultCode(V1_3::ErrorStatus status); + +// Convert execution results to runtime format. Additionally checks that the +// returned results abide by the HAL specification, and logs an error if the +// result violates the specification. +std::tuple, Timing> getExecutionResult( + V1_3::ErrorStatus status, const hardware::hidl_vec& outputShapes, + const V1_2::Timing& timing); +std::tuple, Timing> getExecutionResult( + ErrorStatus status, std::vector outputShapes, Timing timing); + +// Versioning + +bool compliantWithV1_0(const V1_0::Capabilities& capabilities); +bool compliantWithV1_0(const V1_1::Capabilities& capabilities); +bool compliantWithV1_0(const V1_2::Capabilities& capabilities); +bool compliantWithV1_0(const V1_3::Capabilities& capabilities); +bool compliantWithV1_1(const V1_0::Capabilities& capabilities); +bool compliantWithV1_1(const V1_1::Capabilities& capabilities); +bool compliantWithV1_1(const V1_2::Capabilities& capabilities); +bool compliantWithV1_1(const V1_3::Capabilities& capabilities); +bool compliantWithV1_2(const V1_0::Capabilities& capabilities); +bool compliantWithV1_2(const V1_1::Capabilities& capabilities); +bool compliantWithV1_2(const V1_2::Capabilities& capabilities); +bool compliantWithV1_2(const V1_3::Capabilities& capabilities); +bool compliantWithV1_3(const V1_0::Capabilities& capabilities); +bool compliantWithV1_3(const V1_1::Capabilities& capabilities); +bool compliantWithV1_3(const V1_2::Capabilities& capabilities); +bool compliantWithV1_3(const V1_3::Capabilities& capabilities); + +// If noncompliantOperations != nullptr, then +// precondition: noncompliantOperations->empty() +// postcondition: *noncompliantOperations consists of the indices of the noncompliant +// operations; if the compliance check fails for some reason +// other than a noncompliant operation, +// *noncompliantOperations consists of the indices of all operations +bool compliantWithV1_0(const V1_0::Model& model); +bool compliantWithV1_0(const V1_1::Model& model); +bool compliantWithV1_0(const V1_2::Model& model, + std::set* noncompliantOperations = nullptr); +bool compliantWithV1_0(const V1_3::Model& model, + std::set* noncompliantOperations = nullptr); +bool compliantWithV1_1(const V1_0::Model& model); +bool compliantWithV1_1(const V1_1::Model& model); +bool compliantWithV1_1(const V1_2::Model& model, + std::set* noncompliantOperations = nullptr); +bool compliantWithV1_1(const V1_3::Model& model, + std::set* noncompliantOperations = nullptr); +bool compliantWithV1_2(const V1_0::Model& model); +bool compliantWithV1_2(const V1_1::Model& model); +bool compliantWithV1_2(const V1_2::Model& model, + std::set* noncompliantOperations = nullptr); +bool compliantWithV1_2(const V1_3::Model& model, + std::set* noncompliantOperations = nullptr); + +V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status); +V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status); +V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status); +V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status); + +V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities); +V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities); +V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities); +V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities); +V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities); +V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities); +V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities); +V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities); +V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities); +V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities); +V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities); +V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities); +V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities); +V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities); +V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities); +V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities); + +V1_0::Model convertToV1_0(const V1_0::Model& model); +V1_0::Model convertToV1_0(const V1_1::Model& model); +V1_0::Model convertToV1_0(const V1_2::Model& model); +V1_0::Model convertToV1_0(const V1_3::Model& model); +V1_1::Model convertToV1_1(const V1_0::Model& model); +V1_1::Model convertToV1_1(const V1_1::Model& model); +V1_1::Model convertToV1_1(const V1_2::Model& model); +V1_1::Model convertToV1_1(const V1_3::Model& model); +V1_2::Model convertToV1_2(const V1_0::Model& model); +V1_2::Model convertToV1_2(const V1_1::Model& model); +V1_2::Model convertToV1_2(const V1_2::Model& model); +V1_2::Model convertToV1_2(const V1_3::Model& model); +V1_3::Model convertToV1_3(const V1_0::Model& model); +V1_3::Model convertToV1_3(const V1_1::Model& model); +V1_3::Model convertToV1_3(const V1_2::Model& model); +V1_3::Model convertToV1_3(const V1_3::Model& model); + +V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type); +V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type); +V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type); + +V1_0::Operand convertToV1_0(const V1_2::Operand& operand); +V1_0::Operand convertToV1_0(const V1_3::Operand& operand); +V1_2::Operand convertToV1_2(const V1_0::Operand& operand); +V1_2::Operand convertToV1_2(const V1_3::Operand& operand); +V1_3::Operand convertToV1_3(const V1_0::Operand& operand); +V1_3::Operand convertToV1_3(const V1_2::Operand& operand); +V1_3::Operand convertToV1_3(const V1_3::Operand& operand); + +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); + +bool compliantWithV1_0(const V1_0::Request& request); +bool compliantWithV1_0(const V1_3::Request& request); +bool compliantWithV1_2(const V1_3::Request& request); + +V1_0::Request convertToV1_0(const V1_0::Request& request); +V1_0::Request convertToV1_0(const V1_3::Request& request); +V1_0::Request convertToV1_2(const V1_3::Request& request); +V1_3::Request convertToV1_3(const V1_0::Request& request); +V1_3::Request convertToV1_3(const V1_3::Request& request); + +bool compliantWithV1_0(V1_0::OperandLifeTime lifetime); +bool compliantWithV1_0(V1_3::OperandLifeTime lifetime); +bool compliantWithV1_3(V1_0::OperandLifeTime lifetime); +bool compliantWithV1_3(V1_3::OperandLifeTime lifetime); + +V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime); +V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime); +V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime); +V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime); + +constexpr V1_3::Priority convertToHalPriority(int32_t priority) { + switch (priority) { + case ANEURALNETWORKS_PRIORITY_LOW: + return V1_3::Priority::LOW; + case ANEURALNETWORKS_PRIORITY_MEDIUM: + return V1_3::Priority::MEDIUM; + case ANEURALNETWORKS_PRIORITY_HIGH: + return V1_3::Priority::HIGH; + } + LOG(FATAL) << "unrecognized priority: " << priority; + return {}; +} + +constexpr Priority convertToCanonicalPriority(int32_t priority) { + switch (priority) { + case ANEURALNETWORKS_PRIORITY_LOW: + return Priority::LOW; + case ANEURALNETWORKS_PRIORITY_MEDIUM: + return Priority::MEDIUM; + case ANEURALNETWORKS_PRIORITY_HIGH: + return Priority::HIGH; + } + LOG(FATAL) << "unrecognized priority: " << priority; + return {}; +} + +// The function syncWait() has the same semantics as the system function +// ::sync_wait(), except that the syncWait() return value is semantically +// richer. The timeout parameter is in msecs. +enum class FenceState { + ACTIVE, // fence has not been signaled + SIGNALED, // fence has been signaled + ERROR, // fence has been placed in the error state + UNKNOWN, // either bad argument passed to syncWait(), or internal error +}; +FenceState syncWait(int fd, int timeout); + +#ifdef NN_DEBUGGABLE +uint32_t getProp(const char* str, uint32_t defaultValue = 0); +#endif // NN_DEBUGGABLE + +// DEPRECATED. Use checked conversions from nnapi/hal/1.X/Conversions.h. +Capabilities::OperandPerformance uncheckedConvert( + const V1_3::Capabilities::OperandPerformance& operandPerformance); +Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo); +Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities); +DataLocation uncheckedConvert(const V1_0::DataLocation& location); +ErrorStatus uncheckedConvert(V1_0::ErrorStatus status); +ErrorStatus uncheckedConvert(V1_3::ErrorStatus status); +Extension::OperandTypeInformation uncheckedConvert(const V1_2::Extension::OperandTypeInformation&); +Extension uncheckedConvert(const V1_2::Extension& extension); +hardware::hidl_vec uncheckedConvert(const Operand::ExtensionParams& params); +MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure); +Memory uncheckedConvert(const hardware::hidl_memory& memory); +Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix&); +Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph); +Model uncheckedConvert(const V1_3::Model& model); +Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec& params); +Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params); +Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime); +Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params); +OperandType uncheckedConvert(V1_3::OperandType operandType); +Operand uncheckedConvert(const V1_3::Operand& operand); +OperationType uncheckedConvert(V1_3::OperationType operationType); +Operation uncheckedConvert(const V1_3::Operation& operation); +OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration); +OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape); +Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument); +Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool); +Request uncheckedConvert(const V1_3::Request& request); +std::vector uncheckedConvert(const hardware::hidl_vec& extensions); +std::vector uncheckedConvert(const hardware::hidl_vec& memories); +std::vector uncheckedConvert(const hardware::hidl_vec& subgraphs); +std::vector uncheckedConvert(const hardware::hidl_vec& operands); +std::vector uncheckedConvert( + const hardware::hidl_vec& outputShapes); +std::vector uncheckedConvert( + const hardware::hidl_vec& memoryPools); +Timing uncheckedConvert(const V1_2::Timing& timing); + +// DEPRECATED. Use conversions from nnapi/hal/1.X/Conversions.h. +hardware::hidl_memory convertToV1_0(const Memory& memory); +hardware::hidl_vec convertToV1_0(const std::vector& memories); +hardware::hidl_vec convertToV1_0(const Model::OperandValues& operandValues); +hardware::hidl_vec convertToV1_2(const std::vector& outputShapes); +hardware::hidl_vec convertToV1_3(const std::vector& bufferRoles); +V1_0::DataLocation convertToV1_0(const DataLocation& location); +V1_0::ErrorStatus convertToV1_0(ErrorStatus status); +V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument); +V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference); +V1_2::MeasureTiming convertToV1_2(MeasureTiming measure); +V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix&); +V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params); +V1_2::OutputShape convertToV1_2(const OutputShape& outputShape); +V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params); +V1_2::Timing convertToV1_2(const Timing& timing); +V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole); +V1_3::ErrorStatus convertToV1_3(ErrorStatus status); +V1_3::Model convertToV1_3(const Model& model); +V1_3::Operand convertToV1_3(const Operand& operand); +V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime); +V1_3::OperandType convertToV1_3(OperandType operandType); +V1_3::Operation convertToV1_3(const Operation& operation); +V1_3::OperationType convertToV1_3(OperationType operationType); +V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration); +V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint); +V1_3::Priority convertToV1_3(Priority priority); +V1_3::Request convertToV1_3(const Request& request); +V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool); +V1_3::Subgraph convertToV1_3(const Model::Subgraph& model); + +} // namespace nn +} // namespace android + +#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H diff --git a/nn/common/include/LegacyUtils.h b/nn/common/include/LegacyUtils.h new file mode 100644 index 000000000..cdaf91172 --- /dev/null +++ b/nn/common/include/LegacyUtils.h @@ -0,0 +1,611 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H +#define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H + +#include + +#include +#include +#include +#include +#include + +#include "HalInterfaces.h" +#include "NeuralNetworks.h" +#include "OperationResolver.h" +#include "ValidateHal.h" +#include "nnapi/TypeUtils.h" +#include "nnapi/Types.h" + +namespace android { +namespace nn { + +// The number of data types (OperandCode) defined in NeuralNetworks.h. +const int kNumberOfDataTypes = 16; + +// The number of operation types (OperationCode) defined in NeuralNetworks.h. +const int kNumberOfOperationTypes = 102; +static_assert(kNumberOfOperationTypes == BuiltinOperationResolver::kNumberOfOperationTypes); + +// The number of execution preferences defined in NeuralNetworks.h. +const int kNumberOfPreferences = 3; + +// The number of data types (OperandCode) defined in NeuralNetworksOEM.h. +const int kNumberOfDataTypesOEM = 2; + +// The number of operation types (OperationCode) defined in NeuralNetworksOEM.h. +const int kNumberOfOperationTypesOEM = 1; + +// The lowest number assigned to any OEM Code in NeuralNetworksOEM.h. +const int kOEMCodeBase = 10000; + +/* IMPORTANT: if you change the following list, don't + * forget to update the corresponding 'tags' table in + * the initVlogMask() function implemented in Utils.cpp. + */ +enum VLogFlags { MODEL = 0, COMPILATION, EXECUTION, CPUEXE, MANAGER, DRIVER, MEMORY }; + +#define VLOG_IS_ON(TAG) ((vLogMask & (1 << (TAG))) != 0) + +#define VLOG(TAG) \ + if (LIKELY(!VLOG_IS_ON(TAG))) \ + ; \ + else \ + LOG(INFO) + +extern int vLogMask; +void initVLogMask(); + +#ifdef NN_DEBUGGABLE +#define SHOW_IF_DEBUG(msg) msg +#else +#define SHOW_IF_DEBUG(msg) "" +#endif + +// DEPRECATED(b/118737105). Use CHECK. +#define nnAssert(v) CHECK(v) + +#define NN_RETURN_IF_ERROR(expr) \ + do { \ + int _errorCode = (expr); \ + if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \ + return _errorCode; \ + } \ + } while (0) + +// Make an TimeoutDuration from a duration in nanoseconds. If the value exceeds +// the max duration, return the maximum expressible duration. +TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds); + +// Type to represent a deadline time point across processes. +using Deadline = std::chrono::steady_clock::time_point; + +// Make an Deadline from a duration. If the sum of the current time and the +// duration exceeds the max time, return a time point holding the maximum +// expressible time. +Deadline makeDeadline(TimeoutDuration duration); +inline Deadline makeDeadline(uint64_t duration) { + return makeDeadline(makeTimeoutDuration(duration)); +} + +// Convenience function. If the duration is provided, this function creates a +// Deadline using makeDeadline. If the duration is not provided, this function +// returns std::nullopt. +inline std::optional makeDeadline(OptionalTimeoutDuration duration) { + return duration.has_value() ? makeDeadline(*duration) : std::optional{}; +} +inline std::optional makeDeadline(std::optional duration) { + return duration.has_value() ? makeDeadline(*duration) : std::optional{}; +} + +// Make an optional Deadline from an OptionalTimePoint. If +// timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a +// time point holding the maximum Deadline. If the OptionalTimePoint is none, +// this function returns std::nullopt. +std::optional makeDeadline(const V1_3::OptionalTimePoint& timePoint); + +// Returns true if the deadline has passed. Returns false if either the deadline +// has not been exceeded or if the deadline is not present. +bool hasDeadlinePassed(const std::optional& deadline); + +// Make an OptionalTimePoint from an optional Deadline. If the Deadline is not +// provided, this function returns none for OptionalTimePoint. +OptionalTimePoint makeTimePoint(const std::optional& deadline); + +// Ensure that every user of FalseyErrorStream is linked to the +// correct instance, using the correct LOG_TAG +namespace { + +template +struct VersionedType {}; + +template <> +struct VersionedType { + using OperandPerformance = V1_2::Capabilities::OperandPerformance; + using OperandType = V1_2::OperandType; +}; + +template <> +struct VersionedType { + using OperandPerformance = V1_3::Capabilities::OperandPerformance; + using OperandType = V1_3::OperandType; +}; + +template +using VersionedOperandPerformance = typename VersionedType::OperandPerformance; +template +using VersionedOperandType = typename VersionedType::OperandType; + +} // namespace + +// Return a vector with one entry for each non-extension OperandType except +// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be +// sorted by OperandType. +// +// Control flow (OperandType::SUBGRAPH) operation performance is specified +// separately using Capabilities::ifPerformance and +// Capabilities::whilePerformance. +template +hardware::hidl_vec> nonExtensionOperandPerformance( + V1_0::PerformanceInfo perf); + +// Update the vector entry corresponding to the specified OperandType with the +// specified PerformanceInfo value. The vector must already have an entry for +// that OperandType, and must be sorted by OperandType. +void update(hardware::hidl_vec* operandPerformance, + V1_2::OperandType type, V1_0::PerformanceInfo perf); +void update(hardware::hidl_vec* operandPerformance, + V1_3::OperandType type, V1_0::PerformanceInfo perf); + +// Look for a vector entry corresponding to the specified OperandType. If +// found, return the associated PerformanceInfo. If not, return a pessimistic +// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType. +V1_0::PerformanceInfo lookup( + const hardware::hidl_vec& operandPerformance, + V1_2::OperandType type); +V1_0::PerformanceInfo lookup( + const hardware::hidl_vec& operandPerformance, + V1_3::OperandType type); + +// Returns true if an operand type is an extension type. +bool isExtensionOperandType(V1_3::OperandType type); + +// Returns true if an operation type is an extension type. +bool isExtensionOperationType(V1_3::OperationType type); + +// Returns the amount of space needed to store a value of the specified +// dimensions and type. For a tensor with unspecified rank or at least one +// unspecified dimension, returns zero. +// +// Aborts if the specified type is an extension type. +// Aborts if the size would overflow the return type. +// +// See also TypeManager::getSizeOfData(OperandType, const std::vector&). +uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, + const std::vector& dimensions); +uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions); + +// Returns the amount of space needed to store a value of the dimensions and +// type of this operand. For a tensor with unspecified rank or at least one +// unspecified dimension, returns zero. +// +// Aborts if the specified type is an extension type. +// Aborts if the size would overflow the return type. +// +// See also TypeManager::getSizeOfData(const Operand&). +inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) { + return nonExtensionOperandSizeOfData(operand.type, operand.dimensions); +} +inline uint32_t nonExtensionOperandSizeOfData(const V1_3::Operand& operand) { + return nonExtensionOperandSizeOfData(operand.type, operand.dimensions); +} + +// Returns the amount of space needed to store a value of the specified +// dimensions and element size. For a tensor with unspecified rank or at least +// one unspecified dimension, returns zero. +// +// Aborts if the size would overflow the return type. +// +// See also TypeManager::getSizeOfData(const Operand&). +uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector& dimensions); + +// Returns true if the amount of space needed to store a value of the specified +// dimensions and element size overflows the uint32_t type. +// +// Aborts if the specified type is an extension type. +// +// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector&). +bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, + const std::vector& dimensions); +bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, + const std::vector& dimensions); + +// Returns true if the amount of space needed to store a value of the specified +// dimensions and element size overflows the uint32_t type. +// +// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector&). +bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector& dimensions); + +// Returns true if a non-extension operand type is a scalar type. +// +// Aborts if the specified type is an extension type. +// +// See also TypeManager::isTensorType(OperandType). +bool nonExtensionOperandTypeIsScalar(int type); + +// Returns the name of the operation type in ASCII. +std::string getOperationName(V1_3::OperationType opCode); + +// Returns the name of the operand type in ASCII. +std::string getOperandTypeName(V1_3::OperandType type); + +// Whether an operand of tensor type has unspecified dimensions. +// +// Undefined behavior if the operand type is a scalar type. +bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount); +bool tensorHasUnspecifiedDimensions(V1_3::OperandType type, + const std::vector& dimensions); +bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector& dimensions); +bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions); +bool tensorHasUnspecifiedDimensions(const Operand& operand); +bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand); +bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type); + +// Returns the number of padding bytes needed to align data of the +// specified length. It aligns object of length: +// 2, 3 on a 2 byte boundary, +// 4+ on a 4 byte boundary. +// We may want to have different alignments for tensors. +// TODO: This is arbitrary, more a proof of concept. We need +// to determine what this should be. +uint32_t alignBytesNeeded(uint32_t index, size_t length); + +// Does a detailed LOG(INFO) of the model +void logModelToInfo(const V1_0::Model& model); +void logModelToInfo(const V1_1::Model& model); +void logModelToInfo(const V1_2::Model& model); +void logModelToInfo(const V1_3::Model& model); +void logModelToInfo(const Model& model); + +inline std::string toString(uint32_t obj) { + return std::to_string(obj); +} + +template +std::string toString(const std::vector& range) { + std::string os = "["; + for (size_t i = 0; i < range.size(); ++i) { + os += (i == 0 ? "" : ", ") + toString(range[i]); + } + return os += "]"; +} + +template +std::string toString(const std::pair& pair) { + std::ostringstream oss; + oss << "(" << pair.first << ", " << pair.second << ")"; + return oss.str(); +} + +inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) { + return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM); +} + +bool validateOperandSymmPerChannelQuantParams( + const V1_3::Operand& halOperand, + const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag); + +// Validates an operand type. +// +// extensionOperandTypeInfo must be nullptr iff the type is not an extension type. +// +// If allowPartial is true, the dimensions may be underspecified. +int validateOperandType(const ANeuralNetworksOperandType& type, + const Extension::OperandTypeInformation* const extensionOperandTypeInfo, + const char* tag, bool allowPartial); +int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, + const char* tag); + +// A set of functions to help validate models containing IF or WHILE operations. +struct SubgraphValidationHelper { + // Checks if a given operand is a SUBGRAPH operand with a valid offset. + std::function isValidSubgraphReference; + // Gets the input count of a subgraph referenced by a given operand. + std::function getSubgraphInputCount; + // Gets the output count of a subgraph referenced by a given operand. + std::function getSubgraphOutputCount; + // Gets the specified input operand of a subgraph referenced by a given operand. + std::function getSubgraphInputOperand; + // Gets the specified output operand of a subgraph referenced by a given operand. + std::function getSubgraphOutputOperand; + // Whether control flow operations with inner or outer input or output + // operands of unknown size are allowed. + bool allowControlFlowOperationWithOperandOfUnknownSize; +}; + +// Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the +// provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA. +// The last argument is only used for validating IF and WHILE operations. +int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, + const uint32_t* inputIndexes, uint32_t outputCount, + const uint32_t* outputIndexes, const std::vector& operands, + HalVersion halVersion, const SubgraphValidationHelper& helper); + +inline size_t getSizeFromInts(int lower, int higher) { + return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32); +} + +// Convert ANEURALNETWORKS_* result code to ErrorStatus. +// Not guaranteed to be a 1-to-1 mapping. +ErrorStatus convertResultCodeToErrorStatus(int resultCode); +V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode); + +// Convert ErrorStatus to ANEURALNETWORKS_* result code. +// Not guaranteed to be a 1-to-1 mapping. +int convertErrorStatusToResultCode(ErrorStatus status); +int convertErrorStatusToResultCode(V1_3::ErrorStatus status); + +// Convert execution results to runtime format. Additionally checks that the +// returned results abide by the HAL specification, and logs an error if the +// result violates the specification. +std::tuple, Timing> getExecutionResult( + V1_3::ErrorStatus status, const hardware::hidl_vec& outputShapes, + const V1_2::Timing& timing); +std::tuple, Timing> getExecutionResult( + ErrorStatus status, std::vector outputShapes, Timing timing); + +// Versioning + +bool compliantWithV1_0(const V1_0::Capabilities& capabilities); +bool compliantWithV1_0(const V1_1::Capabilities& capabilities); +bool compliantWithV1_0(const V1_2::Capabilities& capabilities); +bool compliantWithV1_0(const V1_3::Capabilities& capabilities); +bool compliantWithV1_1(const V1_0::Capabilities& capabilities); +bool compliantWithV1_1(const V1_1::Capabilities& capabilities); +bool compliantWithV1_1(const V1_2::Capabilities& capabilities); +bool compliantWithV1_1(const V1_3::Capabilities& capabilities); +bool compliantWithV1_2(const V1_0::Capabilities& capabilities); +bool compliantWithV1_2(const V1_1::Capabilities& capabilities); +bool compliantWithV1_2(const V1_2::Capabilities& capabilities); +bool compliantWithV1_2(const V1_3::Capabilities& capabilities); +bool compliantWithV1_3(const V1_0::Capabilities& capabilities); +bool compliantWithV1_3(const V1_1::Capabilities& capabilities); +bool compliantWithV1_3(const V1_2::Capabilities& capabilities); +bool compliantWithV1_3(const V1_3::Capabilities& capabilities); + +// If noncompliantOperations != nullptr, then +// precondition: noncompliantOperations->empty() +// postcondition: *noncompliantOperations consists of the indices of the noncompliant +// operations; if the compliance check fails for some reason +// other than a noncompliant operation, +// *noncompliantOperations consists of the indices of all operations +bool compliantWithV1_0(const V1_0::Model& model); +bool compliantWithV1_0(const V1_1::Model& model); +bool compliantWithV1_0(const V1_2::Model& model, + std::set* noncompliantOperations = nullptr); +bool compliantWithV1_0(const V1_3::Model& model, + std::set* noncompliantOperations = nullptr); +bool compliantWithV1_1(const V1_0::Model& model); +bool compliantWithV1_1(const V1_1::Model& model); +bool compliantWithV1_1(const V1_2::Model& model, + std::set* noncompliantOperations = nullptr); +bool compliantWithV1_1(const V1_3::Model& model, + std::set* noncompliantOperations = nullptr); +bool compliantWithV1_2(const V1_0::Model& model); +bool compliantWithV1_2(const V1_1::Model& model); +bool compliantWithV1_2(const V1_2::Model& model, + std::set* noncompliantOperations = nullptr); +bool compliantWithV1_2(const V1_3::Model& model, + std::set* noncompliantOperations = nullptr); + +V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status); +V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status); +V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status); +V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status); + +V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities); +V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities); +V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities); +V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities); +V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities); +V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities); +V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities); +V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities); +V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities); +V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities); +V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities); +V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities); +V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities); +V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities); +V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities); +V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities); + +V1_0::Model convertToV1_0(const V1_0::Model& model); +V1_0::Model convertToV1_0(const V1_1::Model& model); +V1_0::Model convertToV1_0(const V1_2::Model& model); +V1_0::Model convertToV1_0(const V1_3::Model& model); +V1_1::Model convertToV1_1(const V1_0::Model& model); +V1_1::Model convertToV1_1(const V1_1::Model& model); +V1_1::Model convertToV1_1(const V1_2::Model& model); +V1_1::Model convertToV1_1(const V1_3::Model& model); +V1_2::Model convertToV1_2(const V1_0::Model& model); +V1_2::Model convertToV1_2(const V1_1::Model& model); +V1_2::Model convertToV1_2(const V1_2::Model& model); +V1_2::Model convertToV1_2(const V1_3::Model& model); +V1_3::Model convertToV1_3(const V1_0::Model& model); +V1_3::Model convertToV1_3(const V1_1::Model& model); +V1_3::Model convertToV1_3(const V1_2::Model& model); +V1_3::Model convertToV1_3(const V1_3::Model& model); + +V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type); +V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type); +V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type); + +V1_0::Operand convertToV1_0(const V1_2::Operand& operand); +V1_0::Operand convertToV1_0(const V1_3::Operand& operand); +V1_2::Operand convertToV1_2(const V1_0::Operand& operand); +V1_2::Operand convertToV1_2(const V1_3::Operand& operand); +V1_3::Operand convertToV1_3(const V1_0::Operand& operand); +V1_3::Operand convertToV1_3(const V1_2::Operand& operand); +V1_3::Operand convertToV1_3(const V1_3::Operand& operand); + +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); +hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); + +bool compliantWithV1_0(const V1_0::Request& request); +bool compliantWithV1_0(const V1_3::Request& request); +bool compliantWithV1_2(const V1_3::Request& request); + +V1_0::Request convertToV1_0(const V1_0::Request& request); +V1_0::Request convertToV1_0(const V1_3::Request& request); +V1_0::Request convertToV1_2(const V1_3::Request& request); +V1_3::Request convertToV1_3(const V1_0::Request& request); +V1_3::Request convertToV1_3(const V1_3::Request& request); + +bool compliantWithV1_0(V1_0::OperandLifeTime lifetime); +bool compliantWithV1_0(V1_3::OperandLifeTime lifetime); +bool compliantWithV1_3(V1_0::OperandLifeTime lifetime); +bool compliantWithV1_3(V1_3::OperandLifeTime lifetime); + +V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime); +V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime); +V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime); +V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime); + +constexpr V1_3::Priority convertToHalPriority(int32_t priority) { + switch (priority) { + case ANEURALNETWORKS_PRIORITY_LOW: + return V1_3::Priority::LOW; + case ANEURALNETWORKS_PRIORITY_MEDIUM: + return V1_3::Priority::MEDIUM; + case ANEURALNETWORKS_PRIORITY_HIGH: + return V1_3::Priority::HIGH; + } + LOG(FATAL) << "unrecognized priority: " << priority; + return {}; +} + +constexpr Priority convertToCanonicalPriority(int32_t priority) { + switch (priority) { + case ANEURALNETWORKS_PRIORITY_LOW: + return Priority::LOW; + case ANEURALNETWORKS_PRIORITY_MEDIUM: + return Priority::MEDIUM; + case ANEURALNETWORKS_PRIORITY_HIGH: + return Priority::HIGH; + } + LOG(FATAL) << "unrecognized priority: " << priority; + return {}; +} + +// The function syncWait() has the same semantics as the system function +// ::sync_wait(), except that the syncWait() return value is semantically +// richer. The timeout parameter is in msecs. +enum class FenceState { + ACTIVE, // fence has not been signaled + SIGNALED, // fence has been signaled + ERROR, // fence has been placed in the error state + UNKNOWN, // either bad argument passed to syncWait(), or internal error +}; +FenceState syncWait(int fd, int timeout); + +#ifdef NN_DEBUGGABLE +uint32_t getProp(const char* str, uint32_t defaultValue = 0); +#endif // NN_DEBUGGABLE + +// DEPRECATED. Use checked conversions from nnapi/hal/1.X/Conversions.h. +Capabilities::OperandPerformance uncheckedConvert( + const V1_3::Capabilities::OperandPerformance& operandPerformance); +Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo); +Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities); +DataLocation uncheckedConvert(const V1_0::DataLocation& location); +ErrorStatus uncheckedConvert(V1_0::ErrorStatus status); +ErrorStatus uncheckedConvert(V1_3::ErrorStatus status); +Extension::OperandTypeInformation uncheckedConvert(const V1_2::Extension::OperandTypeInformation&); +Extension uncheckedConvert(const V1_2::Extension& extension); +hardware::hidl_vec uncheckedConvert(const Operand::ExtensionParams& params); +MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure); +Memory uncheckedConvert(const hardware::hidl_memory& memory); +Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix&); +Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph); +Model uncheckedConvert(const V1_3::Model& model); +Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec& params); +Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params); +Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime); +Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params); +OperandType uncheckedConvert(V1_3::OperandType operandType); +Operand uncheckedConvert(const V1_3::Operand& operand); +OperationType uncheckedConvert(V1_3::OperationType operationType); +Operation uncheckedConvert(const V1_3::Operation& operation); +OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration); +OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape); +Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument); +Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool); +Request uncheckedConvert(const V1_3::Request& request); +std::vector uncheckedConvert(const hardware::hidl_vec& extensions); +std::vector uncheckedConvert(const hardware::hidl_vec& memories); +std::vector uncheckedConvert(const hardware::hidl_vec& subgraphs); +std::vector uncheckedConvert(const hardware::hidl_vec& operands); +std::vector uncheckedConvert( + const hardware::hidl_vec& outputShapes); +std::vector uncheckedConvert( + const hardware::hidl_vec& memoryPools); +Timing uncheckedConvert(const V1_2::Timing& timing); + +// DEPRECATED. Use conversions from nnapi/hal/1.X/Conversions.h. +hardware::hidl_memory convertToV1_0(const Memory& memory); +hardware::hidl_vec convertToV1_0(const std::vector& memories); +hardware::hidl_vec convertToV1_0(const Model::OperandValues& operandValues); +hardware::hidl_vec convertToV1_2(const std::vector& outputShapes); +hardware::hidl_vec convertToV1_3(const std::vector& bufferRoles); +V1_0::DataLocation convertToV1_0(const DataLocation& location); +V1_0::ErrorStatus convertToV1_0(ErrorStatus status); +V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument); +V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference); +V1_2::MeasureTiming convertToV1_2(MeasureTiming measure); +V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix&); +V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params); +V1_2::OutputShape convertToV1_2(const OutputShape& outputShape); +V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params); +V1_2::Timing convertToV1_2(const Timing& timing); +V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole); +V1_3::ErrorStatus convertToV1_3(ErrorStatus status); +V1_3::Model convertToV1_3(const Model& model); +V1_3::Operand convertToV1_3(const Operand& operand); +V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime); +V1_3::OperandType convertToV1_3(OperandType operandType); +V1_3::Operation convertToV1_3(const Operation& operation); +V1_3::OperationType convertToV1_3(OperationType operationType); +V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration); +V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint); +V1_3::Priority convertToV1_3(Priority priority); +V1_3::Request convertToV1_3(const Request& request); +V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool); +V1_3::Subgraph convertToV1_3(const Model::Subgraph& model); + +} // namespace nn +} // namespace android + +#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H diff --git a/nn/common/include/Utils.h b/nn/common/include/Utils.h deleted file mode 100644 index cdaf91172..000000000 --- a/nn/common/include/Utils.h +++ /dev/null @@ -1,611 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H - -#include - -#include -#include -#include -#include -#include - -#include "HalInterfaces.h" -#include "NeuralNetworks.h" -#include "OperationResolver.h" -#include "ValidateHal.h" -#include "nnapi/TypeUtils.h" -#include "nnapi/Types.h" - -namespace android { -namespace nn { - -// The number of data types (OperandCode) defined in NeuralNetworks.h. -const int kNumberOfDataTypes = 16; - -// The number of operation types (OperationCode) defined in NeuralNetworks.h. -const int kNumberOfOperationTypes = 102; -static_assert(kNumberOfOperationTypes == BuiltinOperationResolver::kNumberOfOperationTypes); - -// The number of execution preferences defined in NeuralNetworks.h. -const int kNumberOfPreferences = 3; - -// The number of data types (OperandCode) defined in NeuralNetworksOEM.h. -const int kNumberOfDataTypesOEM = 2; - -// The number of operation types (OperationCode) defined in NeuralNetworksOEM.h. -const int kNumberOfOperationTypesOEM = 1; - -// The lowest number assigned to any OEM Code in NeuralNetworksOEM.h. -const int kOEMCodeBase = 10000; - -/* IMPORTANT: if you change the following list, don't - * forget to update the corresponding 'tags' table in - * the initVlogMask() function implemented in Utils.cpp. - */ -enum VLogFlags { MODEL = 0, COMPILATION, EXECUTION, CPUEXE, MANAGER, DRIVER, MEMORY }; - -#define VLOG_IS_ON(TAG) ((vLogMask & (1 << (TAG))) != 0) - -#define VLOG(TAG) \ - if (LIKELY(!VLOG_IS_ON(TAG))) \ - ; \ - else \ - LOG(INFO) - -extern int vLogMask; -void initVLogMask(); - -#ifdef NN_DEBUGGABLE -#define SHOW_IF_DEBUG(msg) msg -#else -#define SHOW_IF_DEBUG(msg) "" -#endif - -// DEPRECATED(b/118737105). Use CHECK. -#define nnAssert(v) CHECK(v) - -#define NN_RETURN_IF_ERROR(expr) \ - do { \ - int _errorCode = (expr); \ - if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \ - return _errorCode; \ - } \ - } while (0) - -// Make an TimeoutDuration from a duration in nanoseconds. If the value exceeds -// the max duration, return the maximum expressible duration. -TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds); - -// Type to represent a deadline time point across processes. -using Deadline = std::chrono::steady_clock::time_point; - -// Make an Deadline from a duration. If the sum of the current time and the -// duration exceeds the max time, return a time point holding the maximum -// expressible time. -Deadline makeDeadline(TimeoutDuration duration); -inline Deadline makeDeadline(uint64_t duration) { - return makeDeadline(makeTimeoutDuration(duration)); -} - -// Convenience function. If the duration is provided, this function creates a -// Deadline using makeDeadline. If the duration is not provided, this function -// returns std::nullopt. -inline std::optional makeDeadline(OptionalTimeoutDuration duration) { - return duration.has_value() ? makeDeadline(*duration) : std::optional{}; -} -inline std::optional makeDeadline(std::optional duration) { - return duration.has_value() ? makeDeadline(*duration) : std::optional{}; -} - -// Make an optional Deadline from an OptionalTimePoint. If -// timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a -// time point holding the maximum Deadline. If the OptionalTimePoint is none, -// this function returns std::nullopt. -std::optional makeDeadline(const V1_3::OptionalTimePoint& timePoint); - -// Returns true if the deadline has passed. Returns false if either the deadline -// has not been exceeded or if the deadline is not present. -bool hasDeadlinePassed(const std::optional& deadline); - -// Make an OptionalTimePoint from an optional Deadline. If the Deadline is not -// provided, this function returns none for OptionalTimePoint. -OptionalTimePoint makeTimePoint(const std::optional& deadline); - -// Ensure that every user of FalseyErrorStream is linked to the -// correct instance, using the correct LOG_TAG -namespace { - -template -struct VersionedType {}; - -template <> -struct VersionedType { - using OperandPerformance = V1_2::Capabilities::OperandPerformance; - using OperandType = V1_2::OperandType; -}; - -template <> -struct VersionedType { - using OperandPerformance = V1_3::Capabilities::OperandPerformance; - using OperandType = V1_3::OperandType; -}; - -template -using VersionedOperandPerformance = typename VersionedType::OperandPerformance; -template -using VersionedOperandType = typename VersionedType::OperandType; - -} // namespace - -// Return a vector with one entry for each non-extension OperandType except -// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be -// sorted by OperandType. -// -// Control flow (OperandType::SUBGRAPH) operation performance is specified -// separately using Capabilities::ifPerformance and -// Capabilities::whilePerformance. -template -hardware::hidl_vec> nonExtensionOperandPerformance( - V1_0::PerformanceInfo perf); - -// Update the vector entry corresponding to the specified OperandType with the -// specified PerformanceInfo value. The vector must already have an entry for -// that OperandType, and must be sorted by OperandType. -void update(hardware::hidl_vec* operandPerformance, - V1_2::OperandType type, V1_0::PerformanceInfo perf); -void update(hardware::hidl_vec* operandPerformance, - V1_3::OperandType type, V1_0::PerformanceInfo perf); - -// Look for a vector entry corresponding to the specified OperandType. If -// found, return the associated PerformanceInfo. If not, return a pessimistic -// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType. -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec& operandPerformance, - V1_2::OperandType type); -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec& operandPerformance, - V1_3::OperandType type); - -// Returns true if an operand type is an extension type. -bool isExtensionOperandType(V1_3::OperandType type); - -// Returns true if an operation type is an extension type. -bool isExtensionOperationType(V1_3::OperationType type); - -// Returns the amount of space needed to store a value of the specified -// dimensions and type. For a tensor with unspecified rank or at least one -// unspecified dimension, returns zero. -// -// Aborts if the specified type is an extension type. -// Aborts if the size would overflow the return type. -// -// See also TypeManager::getSizeOfData(OperandType, const std::vector&). -uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, - const std::vector& dimensions); -uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions); - -// Returns the amount of space needed to store a value of the dimensions and -// type of this operand. For a tensor with unspecified rank or at least one -// unspecified dimension, returns zero. -// -// Aborts if the specified type is an extension type. -// Aborts if the size would overflow the return type. -// -// See also TypeManager::getSizeOfData(const Operand&). -inline uint32_t nonExtensionOperandSizeOfData(const Operand& operand) { - return nonExtensionOperandSizeOfData(operand.type, operand.dimensions); -} -inline uint32_t nonExtensionOperandSizeOfData(const V1_3::Operand& operand) { - return nonExtensionOperandSizeOfData(operand.type, operand.dimensions); -} - -// Returns the amount of space needed to store a value of the specified -// dimensions and element size. For a tensor with unspecified rank or at least -// one unspecified dimension, returns zero. -// -// Aborts if the size would overflow the return type. -// -// See also TypeManager::getSizeOfData(const Operand&). -uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector& dimensions); - -// Returns true if the amount of space needed to store a value of the specified -// dimensions and element size overflows the uint32_t type. -// -// Aborts if the specified type is an extension type. -// -// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector&). -bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, - const std::vector& dimensions); -bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, - const std::vector& dimensions); - -// Returns true if the amount of space needed to store a value of the specified -// dimensions and element size overflows the uint32_t type. -// -// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector&). -bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector& dimensions); - -// Returns true if a non-extension operand type is a scalar type. -// -// Aborts if the specified type is an extension type. -// -// See also TypeManager::isTensorType(OperandType). -bool nonExtensionOperandTypeIsScalar(int type); - -// Returns the name of the operation type in ASCII. -std::string getOperationName(V1_3::OperationType opCode); - -// Returns the name of the operand type in ASCII. -std::string getOperandTypeName(V1_3::OperandType type); - -// Whether an operand of tensor type has unspecified dimensions. -// -// Undefined behavior if the operand type is a scalar type. -bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount); -bool tensorHasUnspecifiedDimensions(V1_3::OperandType type, - const std::vector& dimensions); -bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector& dimensions); -bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions); -bool tensorHasUnspecifiedDimensions(const Operand& operand); -bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand); -bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type); - -// Returns the number of padding bytes needed to align data of the -// specified length. It aligns object of length: -// 2, 3 on a 2 byte boundary, -// 4+ on a 4 byte boundary. -// We may want to have different alignments for tensors. -// TODO: This is arbitrary, more a proof of concept. We need -// to determine what this should be. -uint32_t alignBytesNeeded(uint32_t index, size_t length); - -// Does a detailed LOG(INFO) of the model -void logModelToInfo(const V1_0::Model& model); -void logModelToInfo(const V1_1::Model& model); -void logModelToInfo(const V1_2::Model& model); -void logModelToInfo(const V1_3::Model& model); -void logModelToInfo(const Model& model); - -inline std::string toString(uint32_t obj) { - return std::to_string(obj); -} - -template -std::string toString(const std::vector& range) { - std::string os = "["; - for (size_t i = 0; i < range.size(); ++i) { - os += (i == 0 ? "" : ", ") + toString(range[i]); - } - return os += "]"; -} - -template -std::string toString(const std::pair& pair) { - std::ostringstream oss; - oss << "(" << pair.first << ", " << pair.second << ")"; - return oss.str(); -} - -inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) { - return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM); -} - -bool validateOperandSymmPerChannelQuantParams( - const V1_3::Operand& halOperand, - const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag); - -// Validates an operand type. -// -// extensionOperandTypeInfo must be nullptr iff the type is not an extension type. -// -// If allowPartial is true, the dimensions may be underspecified. -int validateOperandType(const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, - const char* tag, bool allowPartial); -int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, - const char* tag); - -// A set of functions to help validate models containing IF or WHILE operations. -struct SubgraphValidationHelper { - // Checks if a given operand is a SUBGRAPH operand with a valid offset. - std::function isValidSubgraphReference; - // Gets the input count of a subgraph referenced by a given operand. - std::function getSubgraphInputCount; - // Gets the output count of a subgraph referenced by a given operand. - std::function getSubgraphOutputCount; - // Gets the specified input operand of a subgraph referenced by a given operand. - std::function getSubgraphInputOperand; - // Gets the specified output operand of a subgraph referenced by a given operand. - std::function getSubgraphOutputOperand; - // Whether control flow operations with inner or outer input or output - // operands of unknown size are allowed. - bool allowControlFlowOperationWithOperandOfUnknownSize; -}; - -// Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the -// provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA. -// The last argument is only used for validating IF and WHILE operations. -int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, const std::vector& operands, - HalVersion halVersion, const SubgraphValidationHelper& helper); - -inline size_t getSizeFromInts(int lower, int higher) { - return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32); -} - -// Convert ANEURALNETWORKS_* result code to ErrorStatus. -// Not guaranteed to be a 1-to-1 mapping. -ErrorStatus convertResultCodeToErrorStatus(int resultCode); -V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode); - -// Convert ErrorStatus to ANEURALNETWORKS_* result code. -// Not guaranteed to be a 1-to-1 mapping. -int convertErrorStatusToResultCode(ErrorStatus status); -int convertErrorStatusToResultCode(V1_3::ErrorStatus status); - -// Convert execution results to runtime format. Additionally checks that the -// returned results abide by the HAL specification, and logs an error if the -// result violates the specification. -std::tuple, Timing> getExecutionResult( - V1_3::ErrorStatus status, const hardware::hidl_vec& outputShapes, - const V1_2::Timing& timing); -std::tuple, Timing> getExecutionResult( - ErrorStatus status, std::vector outputShapes, Timing timing); - -// Versioning - -bool compliantWithV1_0(const V1_0::Capabilities& capabilities); -bool compliantWithV1_0(const V1_1::Capabilities& capabilities); -bool compliantWithV1_0(const V1_2::Capabilities& capabilities); -bool compliantWithV1_0(const V1_3::Capabilities& capabilities); -bool compliantWithV1_1(const V1_0::Capabilities& capabilities); -bool compliantWithV1_1(const V1_1::Capabilities& capabilities); -bool compliantWithV1_1(const V1_2::Capabilities& capabilities); -bool compliantWithV1_1(const V1_3::Capabilities& capabilities); -bool compliantWithV1_2(const V1_0::Capabilities& capabilities); -bool compliantWithV1_2(const V1_1::Capabilities& capabilities); -bool compliantWithV1_2(const V1_2::Capabilities& capabilities); -bool compliantWithV1_2(const V1_3::Capabilities& capabilities); -bool compliantWithV1_3(const V1_0::Capabilities& capabilities); -bool compliantWithV1_3(const V1_1::Capabilities& capabilities); -bool compliantWithV1_3(const V1_2::Capabilities& capabilities); -bool compliantWithV1_3(const V1_3::Capabilities& capabilities); - -// If noncompliantOperations != nullptr, then -// precondition: noncompliantOperations->empty() -// postcondition: *noncompliantOperations consists of the indices of the noncompliant -// operations; if the compliance check fails for some reason -// other than a noncompliant operation, -// *noncompliantOperations consists of the indices of all operations -bool compliantWithV1_0(const V1_0::Model& model); -bool compliantWithV1_0(const V1_1::Model& model); -bool compliantWithV1_0(const V1_2::Model& model, - std::set* noncompliantOperations = nullptr); -bool compliantWithV1_0(const V1_3::Model& model, - std::set* noncompliantOperations = nullptr); -bool compliantWithV1_1(const V1_0::Model& model); -bool compliantWithV1_1(const V1_1::Model& model); -bool compliantWithV1_1(const V1_2::Model& model, - std::set* noncompliantOperations = nullptr); -bool compliantWithV1_1(const V1_3::Model& model, - std::set* noncompliantOperations = nullptr); -bool compliantWithV1_2(const V1_0::Model& model); -bool compliantWithV1_2(const V1_1::Model& model); -bool compliantWithV1_2(const V1_2::Model& model, - std::set* noncompliantOperations = nullptr); -bool compliantWithV1_2(const V1_3::Model& model, - std::set* noncompliantOperations = nullptr); - -V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status); -V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status); -V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status); -V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status); - -V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities); -V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities); -V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities); -V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities); - -V1_0::Model convertToV1_0(const V1_0::Model& model); -V1_0::Model convertToV1_0(const V1_1::Model& model); -V1_0::Model convertToV1_0(const V1_2::Model& model); -V1_0::Model convertToV1_0(const V1_3::Model& model); -V1_1::Model convertToV1_1(const V1_0::Model& model); -V1_1::Model convertToV1_1(const V1_1::Model& model); -V1_1::Model convertToV1_1(const V1_2::Model& model); -V1_1::Model convertToV1_1(const V1_3::Model& model); -V1_2::Model convertToV1_2(const V1_0::Model& model); -V1_2::Model convertToV1_2(const V1_1::Model& model); -V1_2::Model convertToV1_2(const V1_2::Model& model); -V1_2::Model convertToV1_2(const V1_3::Model& model); -V1_3::Model convertToV1_3(const V1_0::Model& model); -V1_3::Model convertToV1_3(const V1_1::Model& model); -V1_3::Model convertToV1_3(const V1_2::Model& model); -V1_3::Model convertToV1_3(const V1_3::Model& model); - -V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type); -V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type); -V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type); - -V1_0::Operand convertToV1_0(const V1_2::Operand& operand); -V1_0::Operand convertToV1_0(const V1_3::Operand& operand); -V1_2::Operand convertToV1_2(const V1_0::Operand& operand); -V1_2::Operand convertToV1_2(const V1_3::Operand& operand); -V1_3::Operand convertToV1_3(const V1_0::Operand& operand); -V1_3::Operand convertToV1_3(const V1_2::Operand& operand); -V1_3::Operand convertToV1_3(const V1_3::Operand& operand); - -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); - -bool compliantWithV1_0(const V1_0::Request& request); -bool compliantWithV1_0(const V1_3::Request& request); -bool compliantWithV1_2(const V1_3::Request& request); - -V1_0::Request convertToV1_0(const V1_0::Request& request); -V1_0::Request convertToV1_0(const V1_3::Request& request); -V1_0::Request convertToV1_2(const V1_3::Request& request); -V1_3::Request convertToV1_3(const V1_0::Request& request); -V1_3::Request convertToV1_3(const V1_3::Request& request); - -bool compliantWithV1_0(V1_0::OperandLifeTime lifetime); -bool compliantWithV1_0(V1_3::OperandLifeTime lifetime); -bool compliantWithV1_3(V1_0::OperandLifeTime lifetime); -bool compliantWithV1_3(V1_3::OperandLifeTime lifetime); - -V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime); -V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime); -V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime); -V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime); - -constexpr V1_3::Priority convertToHalPriority(int32_t priority) { - switch (priority) { - case ANEURALNETWORKS_PRIORITY_LOW: - return V1_3::Priority::LOW; - case ANEURALNETWORKS_PRIORITY_MEDIUM: - return V1_3::Priority::MEDIUM; - case ANEURALNETWORKS_PRIORITY_HIGH: - return V1_3::Priority::HIGH; - } - LOG(FATAL) << "unrecognized priority: " << priority; - return {}; -} - -constexpr Priority convertToCanonicalPriority(int32_t priority) { - switch (priority) { - case ANEURALNETWORKS_PRIORITY_LOW: - return Priority::LOW; - case ANEURALNETWORKS_PRIORITY_MEDIUM: - return Priority::MEDIUM; - case ANEURALNETWORKS_PRIORITY_HIGH: - return Priority::HIGH; - } - LOG(FATAL) << "unrecognized priority: " << priority; - return {}; -} - -// The function syncWait() has the same semantics as the system function -// ::sync_wait(), except that the syncWait() return value is semantically -// richer. The timeout parameter is in msecs. -enum class FenceState { - ACTIVE, // fence has not been signaled - SIGNALED, // fence has been signaled - ERROR, // fence has been placed in the error state - UNKNOWN, // either bad argument passed to syncWait(), or internal error -}; -FenceState syncWait(int fd, int timeout); - -#ifdef NN_DEBUGGABLE -uint32_t getProp(const char* str, uint32_t defaultValue = 0); -#endif // NN_DEBUGGABLE - -// DEPRECATED. Use checked conversions from nnapi/hal/1.X/Conversions.h. -Capabilities::OperandPerformance uncheckedConvert( - const V1_3::Capabilities::OperandPerformance& operandPerformance); -Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo); -Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities); -DataLocation uncheckedConvert(const V1_0::DataLocation& location); -ErrorStatus uncheckedConvert(V1_0::ErrorStatus status); -ErrorStatus uncheckedConvert(V1_3::ErrorStatus status); -Extension::OperandTypeInformation uncheckedConvert(const V1_2::Extension::OperandTypeInformation&); -Extension uncheckedConvert(const V1_2::Extension& extension); -hardware::hidl_vec uncheckedConvert(const Operand::ExtensionParams& params); -MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure); -Memory uncheckedConvert(const hardware::hidl_memory& memory); -Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix&); -Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph); -Model uncheckedConvert(const V1_3::Model& model); -Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec& params); -Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params); -Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime); -Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params); -OperandType uncheckedConvert(V1_3::OperandType operandType); -Operand uncheckedConvert(const V1_3::Operand& operand); -OperationType uncheckedConvert(V1_3::OperationType operationType); -Operation uncheckedConvert(const V1_3::Operation& operation); -OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration); -OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape); -Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument); -Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool); -Request uncheckedConvert(const V1_3::Request& request); -std::vector uncheckedConvert(const hardware::hidl_vec& extensions); -std::vector uncheckedConvert(const hardware::hidl_vec& memories); -std::vector uncheckedConvert(const hardware::hidl_vec& subgraphs); -std::vector uncheckedConvert(const hardware::hidl_vec& operands); -std::vector uncheckedConvert( - const hardware::hidl_vec& outputShapes); -std::vector uncheckedConvert( - const hardware::hidl_vec& memoryPools); -Timing uncheckedConvert(const V1_2::Timing& timing); - -// DEPRECATED. Use conversions from nnapi/hal/1.X/Conversions.h. -hardware::hidl_memory convertToV1_0(const Memory& memory); -hardware::hidl_vec convertToV1_0(const std::vector& memories); -hardware::hidl_vec convertToV1_0(const Model::OperandValues& operandValues); -hardware::hidl_vec convertToV1_2(const std::vector& outputShapes); -hardware::hidl_vec convertToV1_3(const std::vector& bufferRoles); -V1_0::DataLocation convertToV1_0(const DataLocation& location); -V1_0::ErrorStatus convertToV1_0(ErrorStatus status); -V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument); -V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference); -V1_2::MeasureTiming convertToV1_2(MeasureTiming measure); -V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix&); -V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params); -V1_2::OutputShape convertToV1_2(const OutputShape& outputShape); -V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params); -V1_2::Timing convertToV1_2(const Timing& timing); -V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole); -V1_3::ErrorStatus convertToV1_3(ErrorStatus status); -V1_3::Model convertToV1_3(const Model& model); -V1_3::Operand convertToV1_3(const Operand& operand); -V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime); -V1_3::OperandType convertToV1_3(OperandType operandType); -V1_3::Operation convertToV1_3(const Operation& operation); -V1_3::OperationType convertToV1_3(OperationType operationType); -V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration); -V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint); -V1_3::Priority convertToV1_3(Priority priority); -V1_3::Request convertToV1_3(const Request& request); -V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool); -V1_3::Subgraph convertToV1_3(const Model::Subgraph& model); - -} // namespace nn -} // namespace android - -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H -- cgit v1.2.3 From 72737fc910d8a0f3f96d08bc2eb861551cb84e41 Mon Sep 17 00:00:00 2001 From: Slava Shklyaev Date: Mon, 16 Nov 2020 12:24:07 +0000 Subject: Split Utils into LegacyUtils and LegacyHalUtils To simplify the review process and preserve the git history, change I2b54bc13 copies Utils to LegacyUtils and LegacyHalUtils and this change removes redundant code. Bug: 170289677 Test: NNT_static Test: NNT_utils Change-Id: I7ffc5824a382e121b86e7c8ad0de384c9c1a6d4c Merged-In: I7ffc5824a382e121b86e7c8ad0de384c9c1a6d4c (cherry picked from commit 93a2a1ec8100d4fe7d91373587869a9753cdb6bc) --- nn/common/Android.bp | 2 + nn/common/LegacyHalUtils.cpp | 1834 +----------------------------------- nn/common/LegacyUtils.cpp | 1766 +--------------------------------- nn/common/include/LegacyHalUtils.h | 240 +---- nn/common/include/LegacyUtils.h | 318 +------ nn/common/include/Utils.h | 23 + 6 files changed, 87 insertions(+), 4096 deletions(-) create mode 100644 nn/common/include/Utils.h diff --git a/nn/common/Android.bp b/nn/common/Android.bp index 366470243..2fdfd5e45 100644 --- a/nn/common/Android.bp +++ b/nn/common/Android.bp @@ -86,6 +86,7 @@ cc_library_static { srcs: [ "ExecutionBurstController.cpp", "ExecutionBurstServer.cpp", + "LegacyHalUtils.cpp", "LegacyUtils.cpp", "MemoryUtils.cpp", ], @@ -156,6 +157,7 @@ cc_library_static { "ExecutionBurstServer.cpp", "GraphDump.cpp", "IndexedShapeWrapper.cpp", + "LegacyHalUtils.cpp", "LegacyUtils.cpp", "MemoryUtils.cpp", "MetaModel.cpp", diff --git a/nn/common/LegacyHalUtils.cpp b/nn/common/LegacyHalUtils.cpp index 7417ed8bf..d92c6ea0f 100644 --- a/nn/common/LegacyHalUtils.cpp +++ b/nn/common/LegacyHalUtils.cpp @@ -16,113 +16,29 @@ #define LOG_TAG "Utils" -#include "Utils.h" +#include "LegacyHalUtils.h" -#include -#include -#include -#include +#include #include #include #include #include -#include #include -#include -#include -#include -#include -#include #include #include #include -#include #include #include -#include "ControlFlow.h" #include "NeuralNetworks.h" -#include "NeuralNetworksOEM.h" -#include "OperationResolver.h" #include "ValidateHal.h" -#include "nnapi/TypeUtils.h" namespace android { namespace nn { constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX}; -const char kVLogPropKey[] = "debug.nn.vlog"; -int vLogMask = ~0; - -// Split the space separated list of tags from verbose log setting and build the -// logging mask from it. note that '1' and 'all' are special cases to enable all -// verbose logging. -// -// NN API verbose logging setting comes from system property debug.nn.vlog. -// Example: -// setprop debug.nn.vlog 1 : enable all logging tags. -// setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and -// COMPILATION tags. -void initVLogMask() { - vLogMask = 0; - const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, ""); - if (vLogSetting.empty()) { - return; - } - - std::unordered_map vLogFlags = {{"1", -1}, - {"all", -1}, - {"model", MODEL}, - {"compilation", COMPILATION}, - {"execution", EXECUTION}, - {"cpuexe", CPUEXE}, - {"manager", MANAGER}, - {"driver", DRIVER}, - {"memory", MEMORY}}; - - std::vector elements = android::base::Split(vLogSetting, " ,:"); - for (const auto& elem : elements) { - const auto& flag = vLogFlags.find(elem); - if (flag == vLogFlags.end()) { - LOG(ERROR) << "Unknown trace flag: " << elem; - continue; - } - - if (flag->second == -1) { - // -1 is used for the special values "1" and "all" that enable all - // tracing. - vLogMask = ~0; - return; - } else { - vLogMask |= 1 << flag->second; - } - } -} - -TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds) { - // According to the standard, std::chrono::nanoseconds::rep is a signed - // integer type of at least 64 bits. This check prevents an overflow when - // rep is exactly 64 bits. - if constexpr (sizeof(std::chrono::nanoseconds::rep) == sizeof(int64_t)) { - nanoseconds = std::min(nanoseconds, - static_cast(std::chrono::nanoseconds::max().count())); - } - return std::chrono::nanoseconds{nanoseconds}; -} - -Deadline makeDeadline(TimeoutDuration duration) { - const auto maxTime = Deadline::max(); - const auto currentTime = std::chrono::steady_clock::now(); - - // If there would be an overflow, use the max value. - if (duration > maxTime - currentTime) { - return maxTime; - } - return currentTime + duration; -} - static uint64_t getMaxNanosecondsSinceEpoch() { const auto maxTime = std::chrono::time_point::max(); @@ -146,157 +62,14 @@ std::optional makeDeadline(const V1_3::OptionalTimePoint& timePoint) { return Deadline{std::chrono::nanoseconds{nanosecondsSinceEpoch}}; } -bool hasDeadlinePassed(const std::optional& deadline) { - if (!deadline.has_value()) { - return false; - } - return std::chrono::steady_clock::now() >= *deadline; -} - -static OptionalTimePoint makeTimePoint(const Deadline& deadline) { - return deadline; -} - -OptionalTimePoint makeTimePoint(const std::optional& deadline) { - return deadline.has_value() ? makeTimePoint(*deadline) : OptionalTimePoint{}; -} - -static bool isExtensionOperandType(int32_t type) { - return (static_cast(type) >> kExtensionTypeBits) != 0; -} - -static bool isExtensionOperationType(ANeuralNetworksOperationType type) { - return (static_cast(type) >> kExtensionTypeBits) != 0; -} - bool isExtensionOperandType(V1_3::OperandType type) { - return isExtensionOperandType(static_cast(type)); + return isExtensionOperandType(static_cast(type)); } bool isExtensionOperationType(V1_3::OperationType type) { - return isExtensionOperationType(static_cast(type)); -} - -namespace { - -template -EntryType tableLookup(const EntryType (&table)[entryCount], - const EntryType (&tableOEM)[entryCountOEM], uint32_t code) { - if (code < entryCount) { - return table[code]; - } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) { - return tableOEM[code - kOEMCodeBase]; - } else { - nnAssert(!"tableLookup: bad code"); - return EntryType(); - } -} - -static Version convert(HalVersion halVersion) { - switch (halVersion) { - case HalVersion::UNKNOWN: - break; - case HalVersion::V1_0: - return Version::ANDROID_OC_MR1; - case HalVersion::V1_1: - return Version::ANDROID_P; - case HalVersion::V1_2: - return Version::ANDROID_Q; - case HalVersion::V1_3: - return Version::ANDROID_R; - } - LOG(FATAL) << "Cannot convert " << halVersion; - return {}; -} - -class OperationValidationContext : public IOperationValidationContext { - DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext); - - public: - OperationValidationContext(const char* operationName, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, const Operand* operands) - : operationName(operationName), - inputCount(inputCount), - inputIndexes(inputIndexes), - outputCount(outputCount), - outputIndexes(outputIndexes), - operands(operands) {} - - const char* getOperationName() const override; - - uint32_t getNumInputs() const override; - OperandType getInputType(uint32_t index) const override; - Shape getInputShape(uint32_t index) const override; - const Operand::ExtraParams& getInputExtraParams(uint32_t index) const override; - - uint32_t getNumOutputs() const override; - OperandType getOutputType(uint32_t index) const override; - Shape getOutputShape(uint32_t index) const override; - - private: - const Operand* getInputOperand(uint32_t index) const; - const Operand* getOutputOperand(uint32_t index) const; - - const char* operationName; - uint32_t inputCount; - const uint32_t* inputIndexes; - uint32_t outputCount; - const uint32_t* outputIndexes; - const Operand* operands; - Version version; -}; - -const char* OperationValidationContext::getOperationName() const { - return operationName; -} - -const Operand* OperationValidationContext::getInputOperand(uint32_t index) const { - CHECK(index < static_cast(inputCount)); - return &operands[inputIndexes[index]]; -} - -const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const { - CHECK(index < static_cast(outputCount)); - return &operands[outputIndexes[index]]; -} - -uint32_t OperationValidationContext::getNumInputs() const { - return inputCount; -} - -uint32_t OperationValidationContext::getNumOutputs() const { - return outputCount; -} - -OperandType OperationValidationContext::getInputType(uint32_t index) const { - return getInputOperand(index)->type; -} - -Shape OperationValidationContext::getInputShape(uint32_t index) const { - const Operand* operand = getInputOperand(index); - return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, - operand->extraParams}; -} - -const Operand::ExtraParams& OperationValidationContext::getInputExtraParams(uint32_t index) const { - return getInputOperand(index)->extraParams; -} - -OperandType OperationValidationContext::getOutputType(uint32_t index) const { - return getOutputOperand(index)->type; -} - -Shape OperationValidationContext::getOutputShape(uint32_t index) const { - const Operand* operand = getOutputOperand(index); - return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint, - operand->extraParams}; + return isExtensionOperationType(static_cast(type)); } -}; // anonymous namespace - -#define COUNT(X) (sizeof(X) / sizeof(X[0])) - std::string getOperandTypeName(V1_3::OperandType type) { return toString(type); } @@ -305,166 +78,27 @@ std::string getOperationName(V1_3::OperationType type) { return toString(type); } -const uint32_t kSizeOfDataType[]{ - 4, // ANEURALNETWORKS_FLOAT32 - 4, // ANEURALNETWORKS_INT32 - 4, // ANEURALNETWORKS_UINT32 - 4, // ANEURALNETWORKS_TENSOR_FLOAT32 - 4, // ANEURALNETWORKS_TENSOR_INT32 - 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM - 1, // ANEURALNETWORKS_BOOL - 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM - 2, // ANEURALNETWORKS_TENSOR_FLOAT16 - 1, // ANEURALNETWORKS_TENSOR_BOOL8 - 2, // ANEURALNETWORKS_FLOAT16 - 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL - 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM - 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM - 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED - 0, // ANEURALNETWORKS_MODEL -}; - -static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect"); - -const bool kScalarDataType[]{ - true, // ANEURALNETWORKS_FLOAT32 - true, // ANEURALNETWORKS_INT32 - true, // ANEURALNETWORKS_UINT32 - false, // ANEURALNETWORKS_TENSOR_FLOAT32 - false, // ANEURALNETWORKS_TENSOR_INT32 - false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM - true, // ANEURALNETWORKS_BOOL - false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM - false, // ANEURALNETWORKS_TENSOR_FLOAT16 - false, // ANEURALNETWORKS_TENSOR_BOOL8 - true, // ANEURALNETWORKS_FLOAT16 - false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL - false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM - false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM - false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED - true, // ANEURALNETWORKS_MODEL -}; - -static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect"); - -const uint32_t kSizeOfDataTypeOEM[]{ - 0, // ANEURALNETWORKS_OEM - 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE -}; - -static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM, - "kSizeOfDataTypeOEM is incorrect"); - -const bool kScalarDataTypeOEM[]{ - true, // ANEURALNETWORKS_OEM - false, // ANEURALNETWORKS_TENSOR_OEM_BYTE -}; - -static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM, - "kScalarDataTypeOEM is incorrect"); - -bool nonExtensionOperandTypeIsScalar(int type) { - CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported"; - return tableLookup(kScalarDataType, kScalarDataTypeOEM, type); -} - -uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions) { - const size_t size = getNonExtensionSize(type, dimensions).value(); - CHECK_LE(size, std::numeric_limits::max()); - return size; -} - uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, const std::vector& dimensions) { return nonExtensionOperandSizeOfData(uncheckedConvert(type), dimensions); } -// Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t. -static std::pair sizeOfTensorDataHelper(uint32_t sizeOfElement, - const std::vector& dimensions) { - if (dimensions.empty()) { - return {false, 0}; - } - uint64_t size = static_cast(sizeOfElement); - constexpr uint64_t kMaxSize = static_cast(std::numeric_limits::max()); - for (uint32_t d : dimensions) { - size *= d; - if (size > kMaxSize) return {true, 0}; - } - return {false, static_cast(size)}; -} - -uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector& dimensions) { - const auto [overflow, size] = sizeOfTensorDataHelper(sizeOfElement, dimensions); - CHECK(!overflow); - return size; -} - -bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, - const std::vector& dimensions) { - CHECK(!isExtension(type)) << "Size of extension operand data is unknown"; - int n = static_cast(type); - uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n); - return tableLookup(kScalarDataType, kScalarDataTypeOEM, n) - ? false - : sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions); -} - bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, const std::vector& dimensions) { return nonExtensionOperandSizeOfDataOverflowsUInt32(uncheckedConvert(type), dimensions); } -bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement, - const std::vector& dimensions) { - return sizeOfTensorDataHelper(sizeOfElement, dimensions).first; -} - -bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) { - if (!isExtensionOperandType(type)) { - CHECK(!nonExtensionOperandTypeIsScalar(type)) - << "A scalar type can never have unspecified dimensions"; - } - return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount); -} - -bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector& dimensions) { - return tensorHasUnspecifiedDimensions(static_cast(type), dimensions.data(), - dimensions.size()); -} - bool tensorHasUnspecifiedDimensions(V1_3::OperandType type, const std::vector& dimensions) { return tensorHasUnspecifiedDimensions(static_cast(type), dimensions.data(), dimensions.size()); } -bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) { - return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount); -} - -bool tensorHasUnspecifiedDimensions(const Operand& operand) { - return tensorHasUnspecifiedDimensions(operand.type, operand.dimensions); -} - bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand) { return tensorHasUnspecifiedDimensions(static_cast(operand.type), operand.dimensions.data(), operand.dimensions.size()); } -uint32_t alignBytesNeeded(uint32_t index, size_t length) { - uint32_t pattern; - if (length < 2) { - pattern = 0; // No alignment necessary - } else if (length < 4) { - pattern = 1; // Align on 2-byte boundary - } else { - pattern = 3; // Align on 4-byte boundary - } - uint32_t extra = (~(index - 1)) & pattern; - return extra; -} - void logModelToInfo(const V1_0::Model& model) { LOG(INFO) << "V1_0::Model start"; LOG(INFO) << "operands" << toString(model.operands); @@ -516,11 +150,6 @@ void logModelToInfo(const V1_3::Model& model) { LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); } -void logModelToInfo(const Model& model) { - LOG(INFO) << "Model start"; - logModelToInfo(convertToV1_3(model)); -} - bool validateOperandSymmPerChannelQuantParams( const V1_3::Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag) { @@ -539,177 +168,6 @@ bool validateOperandSymmPerChannelQuantParams( return true; } -static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type"; - NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type"; - return true; -} - -static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type, - const char* tag) { - NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535) - << tag << " invalid zeroPoint: " << type.zeroPoint; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; - NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale"; - return true; -} - -static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) { - NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero"; - NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero"; - return true; -} - -static bool validateTensorDimensions( - const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, - bool allowPartial) { - if (!allowPartial) { - NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions"; - } - uint64_t size = - isExtensionOperandType(type.type) - ? extensionOperandTypeInfo->byteSize - : tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, static_cast(type.type)); - constexpr uint64_t kMaxSize = std::numeric_limits::max(); - for (uint32_t i = 0; i < type.dimensionCount; i++) { - if (!allowPartial) { - NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions"; - } - if (type.dimensions[i] != 0) { - size *= type.dimensions[i]; - NN_RET_CHECK_LE(size, kMaxSize) << tag << " operand byte size exceeds " << kMaxSize; - } - } - return true; -} - -static bool validateOperandTypeHelper( - const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag, - bool allowPartial) { - NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr); - if (isExtensionOperandType(type.type)) { - NN_RET_CHECK(extensionOperandTypeInfo != nullptr); - if (extensionOperandTypeInfo->isTensor) { - NN_RET_CHECK( - validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); - } else { - NN_RET_CHECK(validateScalarDimensions(type, tag)); - } - return validateNoQuantParams(type, tag); - } - - NN_RET_CHECK(extensionOperandTypeInfo == nullptr); - NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type)) - << tag << " invalid OperandType: " << type.type; - - bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type); - if (isScalar) { - NN_RET_CHECK(validateScalarDimensions(type, tag)); - if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types - // to use quantization parameters. - NN_RET_CHECK(validateNoQuantParams(type, tag)); - } - } else { - NN_RET_CHECK(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial)); - if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) { - NN_RET_CHECK(validateQuant8AsymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) { - NN_RET_CHECK(validateQuant8SymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) { - NN_RET_CHECK(validateQuant16AsymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) { - NN_RET_CHECK(validateQuantSymmParams(type, tag)); - } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) { - // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters. - } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) { - // Historically, we have allowed OEM types to use quantization parameters. - } else { - NN_RET_CHECK(validateNoQuantParams(type, tag)); - } - } - - return true; -} - -int validateOperandType(const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, - const char* tag, bool allowPartial) { - return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial) - ? ANEURALNETWORKS_NO_ERROR - : ANEURALNETWORKS_BAD_DATA; -} - -int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, - const char* tag) { - for (uint32_t i = 0; i < count; i++) { - if (list[i] >= operandCount) { - LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i] - << ", operandCount " << operandCount; - return ANEURALNETWORKS_BAD_DATA; - } - } - return ANEURALNETWORKS_NO_ERROR; -} - -int validateOperationOperandTypes(const std::vector& operands, uint32_t inOperandCount, - const uint32_t* inOperandIndexes, - const std::vector& inExpectedTypes, - uint32_t outOperandCount, const uint32_t* outOperandIndexes, - const std::vector& outExpectedInTypes) { - if (inOperandCount != static_cast(inExpectedTypes.size()) || - outOperandCount != static_cast(outExpectedInTypes.size())) { - LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and " - << outExpectedInTypes.size() << " outputs," - << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs"; - return ANEURALNETWORKS_BAD_DATA; - } - for (uint32_t i = 0; i < inOperandCount; i++) { - if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) { - LOG(ERROR) << "Invalid input tensor type " << operands[inOperandIndexes[i]].type - << " for input " << i << ", expected " << inExpectedTypes[i]; - return ANEURALNETWORKS_BAD_DATA; - } - } - for (uint32_t i = 0; i < outOperandCount; i++) { - if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) { - LOG(ERROR) << "Invalid output tensor type " << operands[outOperandIndexes[i]].type - << " for input " << i << ", expected " << outExpectedInTypes[i]; - return ANEURALNETWORKS_BAD_DATA; - } - } - - return ANEURALNETWORKS_NO_ERROR; -} - static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion, HalVersion minSupportedHalVersion) { if (halVersion < minSupportedHalVersion) { @@ -721,156 +179,6 @@ static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion ha return ANEURALNETWORKS_NO_ERROR; } -// Checks if two operands have the same types, ranks (if specified), dimensions -// (if specified), scales, zeroPoints, and extraParams. -static bool compatible(const Operand& a, const Operand& b) { - NN_RET_CHECK(a.type == b.type) << a.type << " != " << b.type; - if (a.dimensions.size() != 0 && b.dimensions.size() != 0) { - NN_RET_CHECK_EQ(a.dimensions.size(), b.dimensions.size()) << "Incompatible dimensions"; - for (uint32_t i = 0, n = a.dimensions.size(); i < n; ++i) { - if (a.dimensions[i] != 0 && b.dimensions[i] != 0) { - NN_RET_CHECK_EQ(a.dimensions[i], b.dimensions[i]) << "Incompatible dimensions"; - } - } - } - NN_RET_CHECK_EQ(a.scale, b.scale); - NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint); - NN_RET_CHECK(a.extraParams == b.extraParams) << a.extraParams << " != " << b.extraParams; - return true; -} - -static bool validateConditionOperand(const Operand& operand) { - NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8) - << "Unexpected condition operand type: " << operand.type; - NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton"; - NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton"; - return true; -} - -static void checkSubgraphValidationHelper(const SubgraphValidationHelper& helper) { - CHECK(helper.isValidSubgraphReference != nullptr); - CHECK(helper.getSubgraphInputCount != nullptr); - CHECK(helper.getSubgraphOutputCount != nullptr); - CHECK(helper.getSubgraphInputOperand != nullptr); - CHECK(helper.getSubgraphOutputOperand != nullptr); -} - -static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, - const uint32_t* outputs, const std::vector& operands, - const SubgraphValidationHelper& helper) { - namespace op = operation_if; - checkSubgraphValidationHelper(helper); - NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_IF must have at least 3 inputs"; - NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_IF must have at least 1 output"; - auto validateBranchOperand = [&](const Operand& branchModelOperand) -> bool { - NN_RET_CHECK(helper.isValidSubgraphReference(branchModelOperand)) - << "Operand is not a valid subgraph reference"; - const uint32_t branchModelInputCount = helper.getSubgraphInputCount(branchModelOperand); - const uint32_t branchModelOutputCount = helper.getSubgraphOutputCount(branchModelOperand); - NN_RET_CHECK_EQ(inputCount, op::kFirstInput + branchModelInputCount); - NN_RET_CHECK_EQ(outputCount, branchModelOutputCount); - for (uint32_t i = 0; i < branchModelInputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphInputOperand(branchModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - } - for (uint32_t i = 0; i < branchModelOutputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphOutputOperand(branchModelOperand, i); - const Operand& outerOperand = operands[outputs[i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - } - return true; - }; - NN_RET_CHECK(validateConditionOperand(operands[inputs[op::kCondBoolOperand]])) - << "Validation failed for IF condition operand"; - NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kThenModelOperand]])) - << "Validation failed for IF then model"; - NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kElseModelOperand]])) - << "Validation failed for IF else model"; - return true; -} - -static bool validateControlFlowOperandUnknownSize(const SubgraphValidationHelper& helper, - const Operand& operand) { - if (!helper.allowControlFlowOperationWithOperandOfUnknownSize && !isExtension(operand.type)) { - NN_RET_CHECK_NE(nonExtensionOperandSizeOfData(operand.type, operand.dimensions), 0u); - } - return true; -} - -static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs, - uint32_t outputCount, const uint32_t* outputs, - const std::vector& operands, - const SubgraphValidationHelper& helper) { - // Let the loop have - // - m >= 1 input-output operands, - // - k >= 0 state-only operands, and - // - n >= 0 input-only operands. - // Then - // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs. - // - the condition model has (m + k + n) inputs and 1 output. - // - the body model has (m + k + n) inputs and (m + k) outputs. - namespace op = operation_while; - checkSubgraphValidationHelper(helper); - NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_WHILE must have at least 3 inputs"; - NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_WHILE must have at least 1 output"; - auto validateCondOperand = [&](const Operand& condModelOperand) -> bool { - NN_RET_CHECK(helper.isValidSubgraphReference(condModelOperand)) - << "Operand is not a valid subgraph reference"; - const uint32_t condModelInputCount = helper.getSubgraphInputCount(condModelOperand); - const uint32_t condModelOutputCount = helper.getSubgraphOutputCount(condModelOperand); - NN_RET_CHECK_EQ(inputCount, op::kFirstInput + condModelInputCount); - NN_RET_CHECK_EQ(condModelOutputCount, 1u); - for (uint32_t i = 0; i < condModelInputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphInputOperand(condModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); - } - NN_RET_CHECK( - validateConditionOperand(*helper.getSubgraphOutputOperand(condModelOperand, 0))); - return true; - }; - auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> bool { - NN_RET_CHECK(helper.isValidSubgraphReference(bodyModelOperand)) - << "Operand is not a valid subgraph reference"; - const uint32_t bodyModelInputCount = helper.getSubgraphInputCount(bodyModelOperand); - const uint32_t bodyModelOutputCount = helper.getSubgraphOutputCount(bodyModelOperand); - NN_RET_CHECK_EQ(inputCount, op::kFirstInput + bodyModelInputCount); - NN_RET_CHECK_GE(bodyModelOutputCount, outputCount); - NN_RET_CHECK_GE(bodyModelInputCount, bodyModelOutputCount); - const uint32_t inputOutputCount = outputCount; - const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount; - const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount; - for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) { - const Operand& innerOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); - const Operand& outerOperand = operands[inputs[op::kFirstInput + i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, innerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); - } - for (uint32_t i = 0; i < inputOutputCount; ++i) { - const Operand& innerOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); - const Operand& outerOperand = operands[outputs[i]]; - NN_RET_CHECK(compatible(innerOperand, outerOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outerOperand)); - } - for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) { - const Operand& inputOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i); - const Operand& outputOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i); - NN_RET_CHECK(compatible(inputOperand, outputOperand)); - NN_RET_CHECK(validateControlFlowOperandUnknownSize(helper, outputOperand)); - } - return true; - }; - NN_RET_CHECK(validateCondOperand(operands[inputs[op::kCondModelOperand]])) - << "Validation failed for WHILE condition model"; - NN_RET_CHECK(validateBodyOperand(operands[inputs[op::kBodyModelOperand]])) - << "Validation failed for WHILE body model"; - return true; -} - static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, const uint32_t* inputIndexes, uint32_t outputCount, const uint32_t* outputIndexes, @@ -884,1077 +192,6 @@ static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_ halVersion, {}); } -int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, const std::vector& operands, - HalVersion halVersion, const SubgraphValidationHelper& helper) { - NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes, - static_cast(operands.size()), - "ANeuralNetworksModel_addOperation inputs")); - NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes, - static_cast(operands.size()), - "ANeuralNetworksModel_addOperation outputs")); - - if (isExtensionOperationType(opType)) { - if (halVersion < HalVersion::V1_2) { - LOG(ERROR) - << "Extension operations are supported since HAL version 1.2, validating using " - << halVersion; - return ANEURALNETWORKS_BAD_DATA; - } - // There is no other validation we can do for an extension operation. - return ANEURALNETWORKS_NO_ERROR; - } - - auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn - << ") or output operands (" << outputCount << ", expected " << expOut - << ") for operation " << opType; - }; - - switch (opType) { - case ANEURALNETWORKS_OEM_OPERATION: { - return ANEURALNETWORKS_NO_ERROR; - } - case ANEURALNETWORKS_RESHAPE: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_DEPTH_TO_SPACE: { - if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 3 or 2) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SPACE_TO_DEPTH: { - if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 3 or 2) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_EMBEDDING_LOOKUP: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[1]].type; - if (inputType != OperandType::TENSOR_FLOAT16 && - inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM && - inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes = {OperandType::TENSOR_INT32, inputType}; - std::vector outExpectedTypes = {inputType}; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else if (inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_HASHTABLE_LOOKUP: { - if (inputCount != 3 || outputCount != 2) { - logInvalidInOutNumber(3, 2); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[2]].type; - if (inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes = {OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, inputType}; - std::vector outExpectedTypes = {inputType, - OperandType::TENSOR_QUANT8_ASYMM}; - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_LSH_PROJECTION: { - if (inputCount != 4 || outputCount != 1) { - logInvalidInOutNumber(4, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[1]].type; - if (inputType != OperandType::TENSOR_FLOAT16 && - inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto hashType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - if (hashType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - inputType, - OperandType::TENSOR_FLOAT16, - OperandType::INT32, - }; - } else if (hashType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - inputType, - OperandType::TENSOR_FLOAT32, - OperandType::INT32, - }; - } else { - LOG(ERROR) << "Unsupported hash tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector outExpectedTypes = {OperandType::TENSOR_INT32}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: { - const uint32_t kNumOutputs = 2; - const uint32_t kNumOutputsMerged = 1; - const uint32_t kNumOutputsWithState = 6; - const uint32_t kNumOutputsMergedWithState = 5; - if (inputCount != 61 || - (outputCount != kNumOutputs && outputCount != kNumOutputsMerged && - outputCount != kNumOutputsWithState && - outputCount != kNumOutputsMergedWithState)) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 61) or output operands (" << outputCount - << ", expected 1, 2, 5 or 6) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - std::vector inExpectedTypes; - auto inputType = operands[inputIndexes[0]].type; - if (inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_FLOAT16) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - inExpectedTypes = {}; - for (int i = 0; i < 48; ++i) { - inExpectedTypes.push_back(inputType); - } - inExpectedTypes.push_back(OperandType::INT32); - inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 - ? OperandType::FLOAT32 - : OperandType::FLOAT16); - inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32 - ? OperandType::FLOAT32 - : OperandType::FLOAT16); - inExpectedTypes.push_back(OperandType::BOOL); - inExpectedTypes.push_back(OperandType::BOOL); - for (int i = 0; i < 8; ++i) { - inExpectedTypes.push_back(inputType); - } - - HalVersion minSupportedHalVersion = HalVersion::V1_2; - if (outputCount == kNumOutputsWithState || outputCount == kNumOutputsMergedWithState) { - minSupportedHalVersion = HalVersion::V1_3; - } - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, minSupportedHalVersion)); - std::vector outExpectedTypes(outputCount, inputType); - auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - return status; - } - case ANEURALNETWORKS_LSTM: { - if ((inputCount != 23 && inputCount != 27) || outputCount != 4) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 23 or 27) or output operands (" << outputCount - << ", expected 4) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes; - std::vector outExpectedTypes; - auto inputType = operands[inputIndexes[0]].type; - if (inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_FLOAT16) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - inExpectedTypes = {inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - inputType, inputType, inputType, inputType, inputType, - OperandType::INT32}; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes.push_back(OperandType::FLOAT32); - inExpectedTypes.push_back(OperandType::FLOAT32); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes.push_back(OperandType::FLOAT16); - inExpectedTypes.push_back(OperandType::FLOAT16); - } - - outExpectedTypes = {inputType, inputType, inputType, inputType}; - if (inputCount == 23) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - for (int i = 0; i < 4; ++i) { - inExpectedTypes.push_back(inputType); - } - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: { - if (inputCount != 15 || outputCount != 2) { - logInvalidInOutNumber(15, 2); - return ANEURALNETWORKS_BAD_DATA; - } - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - std::vector inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM, - OperandType::TENSOR_QUANT8_ASYMM}; - std::vector outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM, - OperandType::TENSOR_QUANT8_ASYMM}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_RANDOM_MULTINOMIAL: { - if (inputCount != 3 || outputCount != 1) { - logInvalidInOutNumber(3, 1); - return ANEURALNETWORKS_BAD_DATA; - } - OperandType inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - inputType, - OperandType::INT32, - OperandType::TENSOR_INT32, - }; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector outExpectedTypes = {OperandType::TENSOR_INT32}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_RNN: { - if (inputCount != 6 || outputCount != 2) { - logInvalidInOutNumber(6, 2); - return ANEURALNETWORKS_BAD_DATA; - } - OperandType inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::INT32, - }; - outExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, - }; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::INT32, - }; - outExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, - }; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SVDF: { - if (inputCount != 7 || outputCount != 2) { - logInvalidInOutNumber(7, 2); - return ANEURALNETWORKS_BAD_DATA; - } - OperandType inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0)); - - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes = { - inputType, inputType, inputType, inputType, - inputType, OperandType::INT32, OperandType::INT32, - }; - std::vector outExpectedTypes = {inputType, inputType}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_BATCH_TO_SPACE_ND: { - if ((inputCount != 3 && inputCount != 2) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 3 or 2) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 3) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SPACE_TO_BATCH_ND: { - if ((inputCount != 4 && inputCount != 3) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 4 or 3) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - if (operands[inputIndexes[0]].zeroPoint != 0) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - inExpectedTypes = { - OperandType::TENSOR_QUANT8_ASYMM_SIGNED, - OperandType::TENSOR_INT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputCount == 4) { - inExpectedTypes.push_back(OperandType::BOOL); - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_PAD: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - if (operands[inputIndexes[0]].zeroPoint == 0) { - NN_RETURN_IF_ERROR( - validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } else { - NN_RETURN_IF_ERROR( - validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - } - inExpectedTypes = { - inputType, - OperandType::TENSOR_INT32, - }; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_PAD_V2: { - if (inputCount != 3 || outputCount != 1) { - logInvalidInOutNumber(3, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_INT32, - OperandType::FLOAT32, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - inExpectedTypes = { - OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_INT32, - OperandType::FLOAT16, - }; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - inExpectedTypes = { - inputType, - OperandType::TENSOR_INT32, - OperandType::INT32, - }; // TODO(b/116699425): Make it UINT8. - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_CAST: { - if (inputCount != 1 || outputCount != 1) { - logInvalidInOutNumber(1, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputOperand = operands[inputIndexes[0]]; - auto outputOperand = operands[outputIndexes[0]]; - auto inputType = inputOperand.type; - auto outputType = outputOperand.type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if ((inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM) && - (outputType == OperandType::TENSOR_FLOAT16 || - outputType == OperandType::TENSOR_FLOAT32 || - outputType == OperandType::TENSOR_INT32 || - outputType == OperandType::TENSOR_QUANT8_ASYMM)) { - inExpectedTypes = {inputType}; - outExpectedTypes = {outputType}; - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else if (inputType == OperandType::TENSOR_BOOL8 || - inputType == OperandType::TENSOR_QUANT16_ASYMM || - inputType == OperandType::TENSOR_QUANT16_SYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED || - inputType == OperandType::TENSOR_QUANT8_SYMM) { - inExpectedTypes = {inputType}; - outExpectedTypes = {inputType}; // Only identity CAST is supported. - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - LOG(ERROR) << "Unsupported data type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - // Validate that output shape is equal to input shape if dimensions - // are already known. - auto getNumberOfElements = [](const hardware::hidl_vec& dims) { - if (dims.size() == 0) { - return 0; - } - return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<>()); - }; - if (inputOperand.dimensions.size() != 0 && outputOperand.dimensions.size() != 0 && - getNumberOfElements(outputOperand.dimensions) != 0 && - inputOperand.dimensions != outputOperand.dimensions) { - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_MEAN: { - if (inputCount != 3 || outputCount != 1) { - logInvalidInOutNumber(3, 1); - return ANEURALNETWORKS_BAD_DATA; - } - const auto inputRank = operands[inputIndexes[0]].dimensions.size(); - if (inputRank > 4) { - LOG(ERROR) << "Unsupported input tensor rank for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT32) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } else if (inputType == OperandType::TENSOR_FLOAT16) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1)); - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes = {inputType, OperandType::TENSOR_INT32, - OperandType::INT32}; - std::vector outExpectedTypes = {inputType}; - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_ARGMAX: - case ANEURALNETWORKS_ARGMIN: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_INT32}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_EXPAND_DIMS: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::INT32}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_SPLIT: { - if (inputCount != 3) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)" - << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - if (inputType != OperandType::TENSOR_FLOAT16 && - inputType != OperandType::TENSOR_FLOAT32 && - inputType != OperandType::TENSOR_INT32 && - inputType != OperandType::TENSOR_QUANT8_ASYMM && - inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - std::vector inExpectedTypes = {inputType, OperandType::INT32, - OperandType::INT32}; - std::vector outExpectedTypes(outputCount, inputType); - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_MAXIMUM: - case ANEURALNETWORKS_MINIMUM: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - std::vector inExpectedTypes; - std::vector outExpectedTypes; - OperandType inputType = operands[inputIndexes[0]].type; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, inputType}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_GROUPED_CONV_2D: { - if ((inputCount != 12 && inputCount != 9) || outputCount != 1) { - LOG(ERROR) << "Invalid number of input operands (" << inputCount - << ", expected 12 or 9) or output operands (" << outputCount - << ", expected 1) for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - auto filterType = operands[inputIndexes[1]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32, - OperandType::TENSOR_FLOAT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT32}; - } else if (inputType == OperandType::TENSOR_FLOAT16) { - inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16, - OperandType::TENSOR_FLOAT16, OperandType::INT32, - OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {OperandType::TENSOR_FLOAT16}; - } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - if (filterType != inputType && - filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - LOG(ERROR) << "Unsupported filter tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL && - std::get( - operands[inputIndexes[1]].extraParams) - .channelDim != 0) { - LOG(ERROR) << "Unsupported filter tensor channel dimension for operation " - << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - inExpectedTypes = { - inputType, filterType, OperandType::TENSOR_INT32, - OperandType::INT32, OperandType::INT32, OperandType::INT32, - OperandType::INT32, OperandType::INT32}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - - if (inputCount == 12) { - std::vector explicitScalarTypes(3, OperandType::INT32); - inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(), - explicitScalarTypes.end()); - } - inExpectedTypes.push_back(OperandType::BOOL); - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_TILE: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32 || - inputType == OperandType::TENSOR_INT32 || - inputType == OperandType::TENSOR_QUANT8_ASYMM || - inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - inExpectedTypes = {inputType, OperandType::TENSOR_INT32}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_POW: { - if (inputCount != 2 || outputCount != 1) { - logInvalidInOutNumber(2, 1); - return ANEURALNETWORKS_BAD_DATA; - } - auto inputType = operands[inputIndexes[0]].type; - std::vector inExpectedTypes; - std::vector outExpectedTypes; - if (inputType == OperandType::TENSOR_FLOAT16 || - inputType == OperandType::TENSOR_FLOAT32) { - inExpectedTypes = {inputType, inputType}; - outExpectedTypes = {inputType}; - } else { - LOG(ERROR) << "Unsupported input tensor type for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - } else { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2)); - } - return validateOperationOperandTypes(operands, inputCount, inputIndexes, - inExpectedTypes, outputCount, outputIndexes, - outExpectedTypes); - } - case ANEURALNETWORKS_IF: { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - return validateIfOperation(inputCount, inputIndexes, outputCount, outputIndexes, - operands, helper) - ? ANEURALNETWORKS_NO_ERROR - : ANEURALNETWORKS_BAD_DATA; - } - case ANEURALNETWORKS_WHILE: { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - return validateWhileOperation(inputCount, inputIndexes, outputCount, outputIndexes, - operands, helper) - ? ANEURALNETWORKS_NO_ERROR - : ANEURALNETWORKS_BAD_DATA; - } - default: { - const OperationRegistration* operationRegistration = - BuiltinOperationResolver::get()->findOperation( - static_cast(opType)); - if (operationRegistration == nullptr) { - if (0 <= opType && opType < kNumberOfOperationTypes) { - LOG(ERROR) << opType << " not registered"; - } else { - LOG(ERROR) << "Operation type " << opType << " out of the range [0, " - << kNumberOfOperationTypes << ")"; - } - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - if (operationRegistration->validate == nullptr) { - LOG(ERROR) << "Incomplete operation registration: " << opType; - return ANEURALNETWORKS_UNEXPECTED_NULL; - } - OperationValidationContext context(operationRegistration->name, inputCount, - inputIndexes, outputCount, outputIndexes, - operands.data()); - const auto maybeVersion = operationRegistration->validate(&context); - if (!maybeVersion.has_value()) { - LOG(ERROR) << "Validation failed for operation " << opType << ": " - << maybeVersion.error(); - return ANEURALNETWORKS_BAD_DATA; - } - if (!validateVersion(&context, convert(halVersion), maybeVersion.value())) { - LOG(ERROR) << "Validation failed for operation " << opType; - return ANEURALNETWORKS_BAD_DATA; - } - return ANEURALNETWORKS_NO_ERROR; - } - } -} - -ErrorStatus convertResultCodeToErrorStatus(int resultCode) { - switch (resultCode) { - case ANEURALNETWORKS_NO_ERROR: - return ErrorStatus::NONE; - - case ANEURALNETWORKS_BAD_DATA: - case ANEURALNETWORKS_UNEXPECTED_NULL: - return ErrorStatus::INVALID_ARGUMENT; - - case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE: - return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; - - case ANEURALNETWORKS_UNAVAILABLE_DEVICE: - return ErrorStatus::DEVICE_UNAVAILABLE; - - case ANEURALNETWORKS_BAD_STATE: - case ANEURALNETWORKS_INCOMPLETE: - case ANEURALNETWORKS_OP_FAILED: - case ANEURALNETWORKS_OUT_OF_MEMORY: - case ANEURALNETWORKS_UNMAPPABLE: - case ANEURALNETWORKS_DEAD_OBJECT: - return ErrorStatus::GENERAL_FAILURE; - - case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT: - return ErrorStatus::MISSED_DEADLINE_TRANSIENT; - case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT: - return ErrorStatus::MISSED_DEADLINE_PERSISTENT; - case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT: - return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT; - case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT: - return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT; - } - LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE"; - return ErrorStatus::GENERAL_FAILURE; -} - -int convertErrorStatusToResultCode(ErrorStatus status) { - switch (status) { - case ErrorStatus::NONE: - return ANEURALNETWORKS_NO_ERROR; - case ErrorStatus::DEVICE_UNAVAILABLE: - return ANEURALNETWORKS_UNAVAILABLE_DEVICE; - case ErrorStatus::GENERAL_FAILURE: - return ANEURALNETWORKS_OP_FAILED; - case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: - return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE; - case ErrorStatus::INVALID_ARGUMENT: - return ANEURALNETWORKS_BAD_DATA; - case ErrorStatus::MISSED_DEADLINE_TRANSIENT: - return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT; - case ErrorStatus::MISSED_DEADLINE_PERSISTENT: - return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT; - case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: - return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT; - case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: - return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT; - case ErrorStatus::DEAD_OBJECT: - return ANEURALNETWORKS_DEAD_OBJECT; - } - LOG(ERROR) << "Unknown ErrorStatus " << status << " mapped to ANEURALNETWORKS_OP_FAILED"; - return ANEURALNETWORKS_OP_FAILED; -} - V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode) { return convertToV1_3(convertResultCodeToErrorStatus(resultCode)); } @@ -1970,23 +207,6 @@ std::tuple, Timing> getExecutionResult( uncheckedConvert(timing)); } -std::tuple, Timing> getExecutionResult( - ErrorStatus status, std::vector outputShapes, Timing timing) { - constexpr Timing kNoTiming = {std::numeric_limits::max(), - std::numeric_limits::max()}; - const int n = convertErrorStatusToResultCode(status); - if (status != ErrorStatus::NONE && status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE && - !outputShapes.empty()) { - LOG(ERROR) << "The driver returned OutputShapes when it shouldn't."; - outputShapes.clear(); - } - if (status != ErrorStatus::NONE && timing != kNoTiming) { - LOG(ERROR) << "The driver returned Timing when it shouldn't."; - timing = kNoTiming; - } - return {n, std::move(outputShapes), timing}; -} - // Capabilities::operandPerformance utilities. // The field Capabilities::operandPerformance is a vector sorted by the field // Capabilities::OperandPerformance::type. @@ -3227,52 +1447,6 @@ V1_3::Request convertToV1_3(const V1_3::Request& request) { return request; } -FenceState syncWait(int fd, int timeout) { - // This implementation is directly based on the ::sync_wait() implementation. - - struct pollfd fds; - int ret; - - if (fd < 0) { - errno = EINVAL; - return FenceState::UNKNOWN; - } - - fds.fd = fd; - fds.events = POLLIN; - - do { - ret = poll(&fds, 1, timeout); - if (ret > 0) { - if (fds.revents & POLLNVAL) { - errno = EINVAL; - return FenceState::UNKNOWN; - } - if (fds.revents & POLLERR) { - errno = EINVAL; - return FenceState::ERROR; - } - return FenceState::SIGNALED; - } else if (ret == 0) { - errno = ETIME; - return FenceState::ACTIVE; - } - } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); - - return FenceState::UNKNOWN; -} - -#ifdef NN_DEBUGGABLE -uint32_t getProp(const char* str, uint32_t defaultValue) { - const std::string propStr = android::base::GetProperty(str, ""); - if (propStr.size() > 0) { - return std::stoi(propStr); - } else { - return defaultValue; - } -} -#endif // NN_DEBUGGABLE - ErrorStatus uncheckedConvert(V1_0::ErrorStatus status) { return nnTryGetValue(convert(status)); } diff --git a/nn/common/LegacyUtils.cpp b/nn/common/LegacyUtils.cpp index 7417ed8bf..52acda864 100644 --- a/nn/common/LegacyUtils.cpp +++ b/nn/common/LegacyUtils.cpp @@ -16,25 +16,19 @@ #define LOG_TAG "Utils" -#include "Utils.h" +#include "LegacyUtils.h" #include #include #include #include -#include -#include -#include -#include +#include #include #include -#include #include -#include #include #include -#include #include #include #include @@ -45,14 +39,10 @@ #include "NeuralNetworks.h" #include "NeuralNetworksOEM.h" #include "OperationResolver.h" -#include "ValidateHal.h" -#include "nnapi/TypeUtils.h" namespace android { namespace nn { -constexpr V1_0::PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX}; - const char kVLogPropKey[] = "debug.nn.vlog"; int vLogMask = ~0; @@ -123,29 +113,6 @@ Deadline makeDeadline(TimeoutDuration duration) { return currentTime + duration; } -static uint64_t getMaxNanosecondsSinceEpoch() { - const auto maxTime = - std::chrono::time_point::max(); - return maxTime.time_since_epoch().count(); -} - -std::optional makeDeadline(const V1_3::OptionalTimePoint& timePoint) { - using Discriminator = V1_3::OptionalTimePoint::hidl_discriminator; - if (timePoint.getDiscriminator() == Discriminator::none) { - return std::nullopt; - } - const uint64_t nanosecondsSinceEpoch = timePoint.nanosecondsSinceEpoch(); - const uint64_t maxNanosecondsSinceEpoch = getMaxNanosecondsSinceEpoch(); - - // Clamp time point to max. - if (nanosecondsSinceEpoch >= maxNanosecondsSinceEpoch) { - return Deadline::max(); - } - - // Return provided time point. - return Deadline{std::chrono::nanoseconds{nanosecondsSinceEpoch}}; -} - bool hasDeadlinePassed(const std::optional& deadline) { if (!deadline.has_value()) { return false; @@ -169,11 +136,11 @@ static bool isExtensionOperationType(ANeuralNetworksOperationType type) { return (static_cast(type) >> kExtensionTypeBits) != 0; } -bool isExtensionOperandType(V1_3::OperandType type) { +bool isExtensionOperandType(OperandType type) { return isExtensionOperandType(static_cast(type)); } -bool isExtensionOperationType(V1_3::OperationType type) { +bool isExtensionOperationType(OperationType type) { return isExtensionOperationType(static_cast(type)); } @@ -297,14 +264,6 @@ Shape OperationValidationContext::getOutputShape(uint32_t index) const { #define COUNT(X) (sizeof(X) / sizeof(X[0])) -std::string getOperandTypeName(V1_3::OperandType type) { - return toString(type); -} - -std::string getOperationName(V1_3::OperationType type) { - return toString(type); -} - const uint32_t kSizeOfDataType[]{ 4, // ANEURALNETWORKS_FLOAT32 4, // ANEURALNETWORKS_INT32 @@ -374,11 +333,6 @@ uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions) { - return nonExtensionOperandSizeOfData(uncheckedConvert(type), dimensions); -} - // Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t. static std::pair sizeOfTensorDataHelper(uint32_t sizeOfElement, const std::vector& dimensions) { @@ -410,11 +364,6 @@ bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, : sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions); } -bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, - const std::vector& dimensions) { - return nonExtensionOperandSizeOfDataOverflowsUInt32(uncheckedConvert(type), dimensions); -} - bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement, const std::vector& dimensions) { return sizeOfTensorDataHelper(sizeOfElement, dimensions).first; @@ -433,12 +382,6 @@ bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector& dimensions) { - return tensorHasUnspecifiedDimensions(static_cast(type), dimensions.data(), - dimensions.size()); -} - bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) { return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount); } @@ -447,11 +390,6 @@ bool tensorHasUnspecifiedDimensions(const Operand& operand) { return tensorHasUnspecifiedDimensions(operand.type, operand.dimensions); } -bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand) { - return tensorHasUnspecifiedDimensions(static_cast(operand.type), operand.dimensions.data(), - operand.dimensions.size()); -} - uint32_t alignBytesNeeded(uint32_t index, size_t length) { uint32_t pattern; if (length < 2) { @@ -465,78 +403,8 @@ uint32_t alignBytesNeeded(uint32_t index, size_t length) { return extra; } -void logModelToInfo(const V1_0::Model& model) { - LOG(INFO) << "V1_0::Model start"; - LOG(INFO) << "operands" << toString(model.operands); - LOG(INFO) << "operations" << toString(model.operations); - LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); - LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); - LOG(INFO) << "operandValues size" << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); -} - -void logModelToInfo(const V1_1::Model& model) { - LOG(INFO) << "V1_1::Model start"; - LOG(INFO) << "operands" << toString(model.operands); - LOG(INFO) << "operations" << toString(model.operations); - LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); - LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); - LOG(INFO) << "operandValues size " << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); -} - -void logModelToInfo(const V1_2::Model& model) { - LOG(INFO) << "V1_2::Model start"; - LOG(INFO) << "operands" << toString(model.operands); - LOG(INFO) << "operations" << toString(model.operations); - LOG(INFO) << "inputIndexes" << toString(model.inputIndexes); - LOG(INFO) << "outputIndexes" << toString(model.outputIndexes); - LOG(INFO) << "operandValues size" << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); - LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16; - LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); -} - -static void logSubgraphToInfo(std::string label, const V1_3::Subgraph& subgraph) { - LOG(INFO) << label << ".operands" << toString(subgraph.operands); - LOG(INFO) << label << ".operations" << toString(subgraph.operations); - LOG(INFO) << label << ".inputIndexes" << toString(subgraph.inputIndexes); - LOG(INFO) << label << ".outputIndexes" << toString(subgraph.outputIndexes); -} - -void logModelToInfo(const V1_3::Model& model) { - LOG(INFO) << "V1_3::Model start"; - logSubgraphToInfo("main", model.main); - for (uint32_t i = 0, n = model.referenced.size(); i < n; ++i) { - logSubgraphToInfo("referenced[" + std::to_string(i) + "]", model.referenced[i]); - } - LOG(INFO) << "operandValues size " << model.operandValues.size(); - LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools)); - LOG(INFO) << "relaxComputationFloat32toFloat16 " << model.relaxComputationFloat32toFloat16; - LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix); -} - void logModelToInfo(const Model& model) { - LOG(INFO) << "Model start"; - logModelToInfo(convertToV1_3(model)); -} - -bool validateOperandSymmPerChannelQuantParams( - const V1_3::Operand& halOperand, - const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag) { - if (halOperand.type != V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { - return false; - } - - NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag; - NN_RET_CHECK(channelQuant.scales != nullptr) << tag; - NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag; - NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u) - << tag << " channel dimension " << channelQuant.channelDim << " is underspecified"; - for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) { - NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]"; - } - return true; + LOG(INFO) << model; } static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) { @@ -871,19 +739,6 @@ static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs, return true; } -static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, - const std::vector& operands, HalVersion halVersion) { - if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) { - NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3)); - LOG(ERROR) << "This validateOperation() overload does not support control flow"; - return ANEURALNETWORKS_BAD_DATA; - } - return validateOperation(opType, inputCount, inputIndexes, outputCount, outputIndexes, operands, - halVersion, {}); -} - int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, const uint32_t* inputIndexes, uint32_t outputCount, const uint32_t* outputIndexes, const std::vector& operands, @@ -1571,7 +1426,7 @@ int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, } // Validate that output shape is equal to input shape if dimensions // are already known. - auto getNumberOfElements = [](const hardware::hidl_vec& dims) { + auto getNumberOfElements = [](const std::vector& dims) { if (dims.size() == 0) { return 0; } @@ -1955,21 +1810,6 @@ int convertErrorStatusToResultCode(ErrorStatus status) { return ANEURALNETWORKS_OP_FAILED; } -V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode) { - return convertToV1_3(convertResultCodeToErrorStatus(resultCode)); -} - -int convertErrorStatusToResultCode(V1_3::ErrorStatus status) { - return convertErrorStatusToResultCode(uncheckedConvert(status)); -} - -std::tuple, Timing> getExecutionResult( - V1_3::ErrorStatus status, const hardware::hidl_vec& outputShapes, - const V1_2::Timing& timing) { - return getExecutionResult(uncheckedConvert(status), uncheckedConvert(outputShapes), - uncheckedConvert(timing)); -} - std::tuple, Timing> getExecutionResult( ErrorStatus status, std::vector outputShapes, Timing timing) { constexpr Timing kNoTiming = {std::numeric_limits::max(), @@ -1987,1579 +1827,51 @@ std::tuple, Timing> getExecutionResult( return {n, std::move(outputShapes), timing}; } -// Capabilities::operandPerformance utilities. -// The field Capabilities::operandPerformance is a vector sorted by the field -// Capabilities::OperandPerformance::type. - -template -hardware::hidl_vec> nonExtensionOperandPerformance( - V1_0::PerformanceInfo perf) { - using OpPerf = VersionedOperandPerformance; - - // Note: range presents enumerators in declaration order, not in numerical order. - static constexpr hardware::hidl_enum_range> kOperandTypeRange; - - std::vector ret; - ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin()); - for (VersionedOperandType type : kOperandTypeRange) { - if (static_cast(type) != V1_3::OperandType::SUBGRAPH) { - ret.push_back(OpPerf{type, perf}); - } - } - std::sort(ret.begin(), ret.end(), - [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; }); - - return ret; -} - -template hardware::hidl_vec -nonExtensionOperandPerformance(V1_0::PerformanceInfo perf); -template hardware::hidl_vec -nonExtensionOperandPerformance(V1_0::PerformanceInfo perf); - -template -void update(hardware::hidl_vec>* operandPerformance, - VersionedOperandType type, V1_0::PerformanceInfo perf) { - CHECK(operandPerformance != nullptr); - const auto it = - std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type, - [](const VersionedOperandPerformance& perf, - VersionedOperandType type) { return perf.type < type; }); - CHECK(it != operandPerformance->end()) - << toString(type) << " not in " << toString(*operandPerformance); - it->info = perf; -} - -void update(hardware::hidl_vec* operandPerformance, - V1_2::OperandType type, V1_0::PerformanceInfo perf) { - update(operandPerformance, type, perf); -} -void update(hardware::hidl_vec* operandPerformance, - V1_3::OperandType type, V1_0::PerformanceInfo perf) { - update(operandPerformance, type, perf); -} - -template -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec>& operandPerformance, - VersionedOperandType type) { - const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type, - [](const VersionedOperandPerformance& perf, - VersionedOperandType type) { - return static_cast(perf.type) < - static_cast(type); - }); - if (it == operandPerformance.end()) { - LOG(WARNING) << "No PerformanceInfo for " << toString(type); - return kNoPerformanceInfo; - } else { - return it->info; - } -} - -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec& operandPerformance, - V1_2::OperandType type) { - return lookup(operandPerformance, type); -} -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec& operandPerformance, - V1_3::OperandType type) { - CHECK(type != V1_3::OperandType::SUBGRAPH) - << "Use Capabilities::ifPerformance or Capabilities::whilePerformance"; - return lookup(operandPerformance, type); -} - -// Versioning - -// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM. -// This array must be in sorted order. -static const V1_3::OperandType kQuantized8PerformanceConsistentWithP[] = { - V1_3::OperandType::INT32, V1_3::OperandType::UINT32, V1_3::OperandType::TENSOR_INT32, - V1_3::OperandType::OEM, V1_3::OperandType::TENSOR_OEM_BYTE}; - -static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) { - const V1_0::PerformanceInfo quantized8Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM); - return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), - std::end(kQuantized8PerformanceConsistentWithP), - [quantized8Performance, &capabilities](V1_3::OperandType type) { - return quantized8Performance == - lookup(capabilities.operandPerformance, - static_cast(type)); - }); -} - -static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) { - const V1_0::PerformanceInfo quantized8Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM); - return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP), - std::end(kQuantized8PerformanceConsistentWithP), - [quantized8Performance, &capabilities](V1_3::OperandType type) { - return quantized8Performance == - lookup(capabilities.operandPerformance, type); - }); -} - -static hardware::hidl_vec -makeQuantized8PerformanceConsistentWithP(V1_0::PerformanceInfo quantized8Performance) { - hardware::hidl_vec ret( - std::size(kQuantized8PerformanceConsistentWithP)); - std::transform(std::begin(kQuantized8PerformanceConsistentWithP), - std::end(kQuantized8PerformanceConsistentWithP), ret.begin(), - [quantized8Performance]( - V1_3::OperandType type) -> V1_2::Capabilities::OperandPerformance { - return {static_cast(type), quantized8Performance}; - }); - return ret; -} - -bool compliantWithV1_0(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_0(const V1_1::Capabilities& capabilities) { - return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance; -} - -bool compliantWithV1_0(const V1_2::Capabilities& capabilities) { - const V1_0::PerformanceInfo perfTensorFloat32 = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32); - const V1_0::PerformanceInfo perfFloat32 = - lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32); - if (perfTensorFloat32 != perfFloat32 || - perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || - perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_0(const V1_3::Capabilities& capabilities) { - const V1_0::PerformanceInfo perfTensorFloat32 = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32); - const V1_0::PerformanceInfo perfFloat32 = - lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32); - if (perfTensorFloat32 != perfFloat32 || - perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor || - perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_1(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_1(const V1_1::Capabilities&) { - return true; -} - -bool compliantWithV1_1(const V1_2::Capabilities& capabilities) { - if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != - capabilities.relaxedFloat32toFloat16PerformanceScalar) || - (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) != - lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_1(const V1_3::Capabilities& capabilities) { - if ((capabilities.relaxedFloat32toFloat16PerformanceTensor != - capabilities.relaxedFloat32toFloat16PerformanceScalar) || - (lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32) != - lookup(capabilities.operandPerformance, V1_3::OperandType::FLOAT32))) { - return false; - } - - return isQuantized8PerformanceConsistentWithP(capabilities); -} - -bool compliantWithV1_2(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_2(const V1_1::Capabilities&) { - return true; -} - -bool compliantWithV1_2(const V1_2::Capabilities&) { - return true; -} - -bool compliantWithV1_2(const V1_3::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_0::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_1::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_2::Capabilities&) { - return true; -} - -bool compliantWithV1_3(const V1_3::Capabilities&) { - return true; -} - -V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) { - return status; -} - -V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) { - switch (status) { - case V1_3::ErrorStatus::NONE: - return V1_0::ErrorStatus::NONE; - case V1_3::ErrorStatus::DEVICE_UNAVAILABLE: - return V1_0::ErrorStatus::DEVICE_UNAVAILABLE; - case V1_3::ErrorStatus::GENERAL_FAILURE: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: - return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; - case V1_3::ErrorStatus::INVALID_ARGUMENT: - return V1_0::ErrorStatus::INVALID_ARGUMENT; - case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT: - return V1_0::ErrorStatus::GENERAL_FAILURE; - } - LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE"; - return V1_0::ErrorStatus::GENERAL_FAILURE; -} - -V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) { - return static_cast(status); -} - -V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) { - return status; -} - -static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) { - return static_cast(type); -} - -static V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) { - return static_cast(type); -} - -V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type) { - return static_cast(type); -} - -static V1_1::OperationType convertToV1_1(V1_0::OperationType type) { - return static_cast(type); -} - -static V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) { - return static_cast(type); -} - -V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type) { - return static_cast(type); -} - -static V1_2::OperationType convertToV1_2(V1_0::OperationType type) { - return static_cast(type); -} - -static V1_2::OperationType convertToV1_2(V1_1::OperationType type) { - return static_cast(type); -} - -V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type) { - return static_cast(type); -} - -static V1_3::OperationType convertToV1_3(V1_0::OperationType type) { - return static_cast(type); -} - -static V1_3::OperationType convertToV1_3(V1_1::OperationType type) { - return static_cast(type); -} - -static V1_3::OperationType convertToV1_3(V1_2::OperationType type) { - return static_cast(type); -} - -V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) { - return capabilities; -} - -V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) { - if (!compliantWithV1_0(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_1::Capabilities to V1_0::Capabilities"; - } - return {.float32Performance = capabilities.float32Performance, - .quantized8Performance = capabilities.quantized8Performance}; -} - -V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) { - if (!compliantWithV1_0(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_2::Capabilities to V1_0::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), - .quantized8Performance = lookup(capabilities.operandPerformance, - V1_2::OperandType::TENSOR_QUANT8_ASYMM)}; -} - -V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) { - if (!compliantWithV1_0(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_3::Capabilities to V1_0::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32), - .quantized8Performance = lookup(capabilities.operandPerformance, - V1_3::OperandType::TENSOR_QUANT8_ASYMM)}; -} - -V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) { - return {.float32Performance = capabilities.float32Performance, - .quantized8Performance = capabilities.quantized8Performance, - .relaxedFloat32toFloat16Performance = capabilities.float32Performance}; -} - -V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) { - return capabilities; -} +FenceState syncWait(int fd, int timeout) { + // This implementation is directly based on the ::sync_wait() implementation. -V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) { - if (!compliantWithV1_1(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_2::Capabilities to V1_1::Capabilities"; - } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32), - .quantized8Performance = - lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM), - .relaxedFloat32toFloat16Performance = - capabilities.relaxedFloat32toFloat16PerformanceTensor}; -} + struct pollfd fds; + int ret; -V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) { - if (!compliantWithV1_1(capabilities)) { - LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities) - << " from V1_3::Capabilities to V1_1::Capabilities"; + if (fd < 0) { + errno = EINVAL; + return FenceState::UNKNOWN; } - return {.float32Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_FLOAT32), - .quantized8Performance = - lookup(capabilities.operandPerformance, V1_3::OperandType::TENSOR_QUANT8_ASYMM), - .relaxedFloat32toFloat16Performance = - capabilities.relaxedFloat32toFloat16PerformanceTensor}; -} -V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) { - V1_2::Capabilities ret = { - .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance, - .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance, - .operandPerformance = - makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)}; - auto& opPerf = ret.operandPerformance; - opPerf.resize(opPerf.size() + 2); - opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, - capabilities.float32Performance}; - opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; - using OperandPerformance = V1_2::Capabilities::OperandPerformance; - std::sort(opPerf.begin(), opPerf.end(), - [](const OperandPerformance& a, const OperandPerformance& b) { - return a.type < b.type; - }); - return ret; -} - -V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) { - V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar = - capabilities.relaxedFloat32toFloat16Performance, - .relaxedFloat32toFloat16PerformanceTensor = - capabilities.relaxedFloat32toFloat16Performance, - .operandPerformance = makeQuantized8PerformanceConsistentWithP( - capabilities.quantized8Performance)}; - auto& opPerf = ret.operandPerformance; - opPerf.resize(opPerf.size() + 2); - opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32, - capabilities.float32Performance}; - opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance}; - using OperandPerformance = V1_2::Capabilities::OperandPerformance; - std::sort(opPerf.begin(), opPerf.end(), - [](const OperandPerformance& a, const OperandPerformance& b) { - return a.type < b.type; - }); - return ret; -} - -V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) { - return capabilities; -} - -V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) { - V1_2::Capabilities ret = { - .relaxedFloat32toFloat16PerformanceScalar = - capabilities.relaxedFloat32toFloat16PerformanceScalar, - .relaxedFloat32toFloat16PerformanceTensor = - capabilities.relaxedFloat32toFloat16PerformanceTensor, - }; - const auto& inputOpPerf = capabilities.operandPerformance; - hardware::hidl_vec opPerfSupported; - opPerfSupported.resize(inputOpPerf.size()); - auto last = - std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(), - [](V1_3::Capabilities::OperandPerformance opPerf) { - return validOperandType(static_cast(opPerf.type)); - }); - opPerfSupported.resize(std::distance(opPerfSupported.begin(), last)); - - auto& convertedOpPerf = ret.operandPerformance; - convertedOpPerf.resize(opPerfSupported.size()); - std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(), - [](V1_3::Capabilities::OperandPerformance opPerf) { - return V1_2::Capabilities::OperandPerformance{ - static_cast(opPerf.type), opPerf.info}; - }); - return ret; -} - -V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) { - return convertToV1_3(convertToV1_2(capabilities)); -} - -V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) { - return convertToV1_3(convertToV1_2(capabilities)); -} - -V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) { - V1_3::Capabilities ret = { - .relaxedFloat32toFloat16PerformanceScalar = - capabilities.relaxedFloat32toFloat16PerformanceScalar, - .relaxedFloat32toFloat16PerformanceTensor = - capabilities.relaxedFloat32toFloat16PerformanceTensor, - .ifPerformance = kNoPerformanceInfo, - .whilePerformance = kNoPerformanceInfo, - }; - auto& opPerf = ret.operandPerformance; - opPerf.resize(capabilities.operandPerformance.size()); - std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(), - opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) { - return V1_3::Capabilities::OperandPerformance{ - static_cast(opPerf.type), opPerf.info}; - }); - return ret; -} - -V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) { - return capabilities; -} - -static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) { - return {.type = uncheckedConvertToV1_0(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) { - return {.type = convertToV1_1(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static hardware::hidl_vec uncheckedConvertToV1_0( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_1( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_0::Operation& operation) { return convertToV1_1(operation); }); - return result; -} - -bool compliantWithV1_0(const V1_3::Operand& operand) { - return validOperandType(static_cast(operand.type)) && - (nonExtensionOperandTypeIsScalar(static_cast(operand.type)) || - operand.dimensions.size() != 0) && - compliantWithV1_0(operand.lifetime); -} + fds.fd = fd; + fds.events = POLLIN; -bool compliantWithV1_2(const V1_3::Operand& operand) { - return validOperandType(static_cast(operand.type)) && - compliantWithV1_0(operand.lifetime); -} + do { + ret = poll(&fds, 1, timeout); + if (ret > 0) { + if (fds.revents & POLLNVAL) { + errno = EINVAL; + return FenceState::UNKNOWN; + } + if (fds.revents & POLLERR) { + errno = EINVAL; + return FenceState::ERROR; + } + return FenceState::SIGNALED; + } else if (ret == 0) { + errno = ETIME; + return FenceState::ACTIVE; + } + } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); -bool compliantWithV1_3(const V1_3::Operand& operand) { - return true; + return FenceState::UNKNOWN; } -static bool compliantWith(HalVersion version, const V1_3::Model& model, - std::set* noncompliantOperations) { - // A boolean vector indicating whether each pool is compliant with the target HAL version. - std::vector isPoolCompliant(model.pools.size(), false); - std::transform( - model.pools.begin(), model.pools.end(), isPoolCompliant.begin(), - [version](const hardware::hidl_memory& pool) { return validatePool(pool, version); }); - - // A boolean vector indicating whether each operand is compliant with the target HAL version. - std::vector isOperandCompliant(model.main.operands.size(), false); - std::transform(model.main.operands.begin(), model.main.operands.end(), - isOperandCompliant.begin(), - [&isPoolCompliant, version](const V1_3::Operand& op) { - bool is_operand_compliant = false; - switch (version) { - case HalVersion::UNKNOWN: - is_operand_compliant = false; - break; - case HalVersion::V1_0: - is_operand_compliant = compliantWithV1_0(op); - break; - case HalVersion::V1_1: - // There is no V1_1::Operand -- both V1_0::Model - // and V1_1::Model use V1_0::Operand. - is_operand_compliant = compliantWithV1_0(op); - break; - case HalVersion::V1_2: - is_operand_compliant = compliantWithV1_2(op); - break; - case HalVersion::V1_3: - is_operand_compliant = compliantWithV1_3(op); - break; - } - return is_operand_compliant && - !(op.lifetime == V1_3::OperandLifeTime::CONSTANT_REFERENCE && - !isPoolCompliant[op.location.poolIndex]); - }); - - auto allOperandsCompliant = [&isOperandCompliant](const hardware::hidl_vec& indices) { - return std::all_of( - indices.begin(), indices.end(), - [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; }); - }; - - auto localValidateOperation = [&model, version, - &allOperandsCompliant](const V1_3::Operation& op) { - if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false; - int error = validateOperation(static_cast(op.type), op.inputs.size(), - op.inputs.size() > 0 ? op.inputs.data() : nullptr, - op.outputs.size(), - op.outputs.size() > 0 ? op.outputs.data() : nullptr, - uncheckedConvert(model.main.operands), version); - return error == ANEURALNETWORKS_NO_ERROR; - }; - - if (noncompliantOperations) { - CHECK(noncompliantOperations->empty()); - for (uint32_t idx = 0; idx < model.main.operations.size(); ++idx) { - if (!localValidateOperation(model.main.operations[idx])) { - noncompliantOperations->insert(idx); - } - } - return noncompliantOperations->empty(); +#ifdef NN_DEBUGGABLE +uint32_t getProp(const char* str, uint32_t defaultValue) { + const std::string propStr = android::base::GetProperty(str, ""); + if (propStr.size() > 0) { + return std::stoi(propStr); } else { - return std::all_of(model.main.operations.begin(), model.main.operations.end(), - localValidateOperation); - } -} - -bool compliantWithV1_0(const V1_0::Model& model) { - return true; -} - -bool compliantWithV1_0(const V1_1::Model& model) { - // In addition to new enumeration values being introduced in V1_1::Model, a - // new flag was introduced to indicate whether or not float32 data can be - // calculated using float16 units. This 'relaxComputationFloat32toFloat16' - // flag is not relevant in whether a V1_1::Model is compliant with a - // V1_0::Model because all 1.0 drivers require strict calculation by default - // in the P NN runtime. Even if fp16 calculations are allowed, they can - // still be computed by a strict fp32 driver. - auto operands = uncheckedConvert(convertToV1_3(model.operands)); - return std::all_of(model.operations.begin(), model.operations.end(), - [&operands](const V1_1::Operation& op) { - int error = validateOperation( - static_cast(op.type), op.inputs.size(), - op.inputs.size() > 0 ? op.inputs.data() : nullptr, - op.outputs.size(), - op.outputs.size() > 0 ? op.outputs.data() : nullptr, operands, - HalVersion::V1_0); - return error == ANEURALNETWORKS_NO_ERROR; - }); -} - -bool compliantWithV1_0(const V1_2::Model& model, std::set* noncompliantOperations) { - return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations); -} - -bool compliantWithV1_0(const V1_3::Model& model, std::set* noncompliantOperations) { - return compliantWith(HalVersion::V1_0, model, noncompliantOperations); -} - -bool compliantWithV1_1(const V1_0::Model&) { - return true; -} - -bool compliantWithV1_1(const V1_1::Model&) { - return true; -} - -bool compliantWithV1_1(const V1_2::Model& model, std::set* noncompliantOperations) { - return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations); -} - -bool compliantWithV1_1(const V1_3::Model& model, std::set* noncompliantOperations) { - return compliantWith(HalVersion::V1_1, model, noncompliantOperations); -} - -bool compliantWithV1_2(const V1_0::Model&) { - return true; -} - -bool compliantWithV1_2(const V1_1::Model&) { - return true; -} - -bool compliantWithV1_2(const V1_2::Model&, std::set* noncompliantOperations) { - return true; -} - -bool compliantWithV1_2(const V1_3::Model& model, std::set* noncompliantOperations) { - return compliantWith(HalVersion::V1_2, model, noncompliantOperations); -} - -static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) { - return {.type = uncheckedConvertToV1_0(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) { - return {.type = uncheckedConvertToV1_0(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) { - return {.type = uncheckedConvertToV1_1(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) { - return {.type = uncheckedConvertToV1_1(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) { - return {.type = convertToV1_2(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) { - return {.type = convertToV1_2(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) { - return {.type = uncheckedConvertToV1_2(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) { - return {.type = convertToV1_3(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) { - return {.type = convertToV1_3(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) { - return {.type = convertToV1_3(operation.type), - .inputs = operation.inputs, - .outputs = operation.outputs}; -} - -static hardware::hidl_vec uncheckedConvertToV1_0( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); }); - return result; -} - -static hardware::hidl_vec uncheckedConvertToV1_0( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); }); - return result; -} - -static hardware::hidl_vec uncheckedConvertToV1_2( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); }); - return result; -} - -static hardware::hidl_vec uncheckedConvertToV1_1( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); }); - return result; -} - -static hardware::hidl_vec uncheckedConvertToV1_1( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform( - operations.begin(), operations.end(), result.begin(), - [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_2( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_0::Operation& operation) { return convertToV1_2(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_2( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_1::Operation& operation) { return convertToV1_2(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_3( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_0::Operation& operation) { return convertToV1_3(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_3( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_1::Operation& operation) { return convertToV1_3(operation); }); - return result; -} - -static hardware::hidl_vec convertToV1_3( - const hardware::hidl_vec& operations) { - hardware::hidl_vec result(operations.size()); - std::transform(operations.begin(), operations.end(), result.begin(), - [](const V1_2::Operation& operation) { return convertToV1_3(operation); }); - return result; -} - -static bool compliantWithV1_0(const V1_2::OperandType& operandType) { - return validOperandType(static_cast(operandType)); -} - -static bool compliantWithV1_0(const V1_3::OperandType& operandType) { - return validOperandType(static_cast(operandType)); -} - -static bool compliantWithV1_2(const V1_3::OperandType& operandType) { - return validOperandType(static_cast(operandType)); -} - -V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) { - if (!compliantWithV1_0(operandType)) { - LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) - << " from V1_2::OperandType to V1_0::OperandType"; - } - return static_cast(operandType); -} - -V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) { - return static_cast(operandType); -} - -V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) { - if (!compliantWithV1_2(operandType)) { - LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) - << " from V1_3::OperandType to V1_2::OperandType"; - } - return static_cast(operandType); -} - -V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) { - if (!compliantWithV1_0(operandType)) { - LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType) - << " from V1_3::Operand to V1_0::Operand"; - } - return static_cast(operandType); -} - -bool compliantWithV1_0(V1_0::OperandLifeTime lifetime) { - return true; -} - -bool compliantWithV1_0(V1_3::OperandLifeTime lifetime) { - return lifetime != V1_3::OperandLifeTime::SUBGRAPH; -} - -bool compliantWithV1_3(V1_0::OperandLifeTime lifetime) { - return true; -} - -bool compliantWithV1_3(V1_3::OperandLifeTime lifetime) { - return true; -} - -V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime) { - return lifetime; -} - -V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime) { - if (!compliantWithV1_0(lifetime)) { - LOG(ERROR) << "Upcasting non-compliant lifetime " << toString(lifetime) - << " from V1_3 to V1_0"; - } - return static_cast(lifetime); -} - -V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime) { - return static_cast(lifetime); -} - -V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime) { - return lifetime; -} - -V1_0::Operand convertToV1_0(const V1_2::Operand& operand) { - return {.type = convertToV1_0(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_0(operand.lifetime), - .location = operand.location}; -} - -V1_0::Operand convertToV1_0(const V1_3::Operand& operand) { - return {.type = convertToV1_0(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_0(operand.lifetime), - .location = operand.location}; -} - -V1_2::Operand convertToV1_2(const V1_0::Operand& operand) { - return {.type = convertToV1_2(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = operand.lifetime, - .location = operand.location}; -} - -V1_2::Operand convertToV1_2(const V1_3::Operand& operand) { - return {.type = convertToV1_2(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = static_cast(operand.lifetime), - .location = operand.location, - .extraParams = operand.extraParams}; -} - -V1_3::Operand convertToV1_3(const V1_0::Operand& operand) { - return {.type = static_cast(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_3(operand.lifetime), - .location = operand.location}; -} - -V1_3::Operand convertToV1_3(const V1_2::Operand& operand) { - return {.type = static_cast(operand.type), - .dimensions = operand.dimensions, - .numberOfConsumers = operand.numberOfConsumers, - .scale = operand.scale, - .zeroPoint = operand.zeroPoint, - .lifetime = convertToV1_3(operand.lifetime), - .location = operand.location, - .extraParams = operand.extraParams}; -} - -V1_3::Operand convertToV1_3(const V1_3::Operand& operand) { - return operand; -} - -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { - return operands; -} - -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_2::Operand& operand) { return convertToV1_0(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_3::Operand& operand) { return convertToV1_0(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_0::Operand& operand) { return convertToV1_2(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { - return operands; -} - -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_3::Operand& operand) { return convertToV1_2(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_0::Operand& operand) { return convertToV1_3(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { - hardware::hidl_vec result(operands.size()); - std::transform(operands.begin(), operands.end(), result.begin(), - [](const V1_2::Operand& operand) { return convertToV1_3(operand); }); - return result; -} - -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands) { - return operands; -} - -V1_0::Model convertToV1_0(const V1_0::Model& model) { - return model; -} - -V1_0::Model convertToV1_0(const V1_1::Model& model) { - if (!compliantWithV1_0(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_1::Model to V1_0::Model"; - } - return {.operands = model.operands, - .operations = uncheckedConvertToV1_0(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools}; -} - -V1_0::Model convertToV1_0(const V1_2::Model& model) { - if (!compliantWithV1_0(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_2::Model to V1_0::Model"; - } - return {.operands = convertToV1_0(model.operands), - .operations = uncheckedConvertToV1_0(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools}; -} - -V1_0::Model convertToV1_0(const V1_3::Model& model) { - if (!compliantWithV1_0(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_3::Model to V1_0::Model"; - } - return {.operands = convertToV1_0(model.main.operands), - .operations = uncheckedConvertToV1_0(model.main.operations), - .inputIndexes = model.main.inputIndexes, - .outputIndexes = model.main.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools}; -} - -V1_1::Model convertToV1_1(const V1_0::Model& model) { - return {.operands = model.operands, - .operations = convertToV1_1(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = false}; -} - -V1_1::Model convertToV1_1(const V1_1::Model& model) { - return model; -} - -V1_1::Model convertToV1_1(const V1_2::Model& model) { - if (!compliantWithV1_1(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_2::Model to V1_1::Model"; - } - return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical. - .operations = uncheckedConvertToV1_1(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_1::Model convertToV1_1(const V1_3::Model& model) { - if (!compliantWithV1_1(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_3::Model to V1_1::Model"; - } - return {// Operands in 1.1 and 1.0 are identical. - .operands = convertToV1_0(model.main.operands), - .operations = uncheckedConvertToV1_1(model.main.operations), - .inputIndexes = model.main.inputIndexes, - .outputIndexes = model.main.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_2::Model convertToV1_2(const V1_0::Model& model) { - return {.operands = convertToV1_2(model.operands), - .operations = convertToV1_2(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = false}; -} - -V1_2::Model convertToV1_2(const V1_1::Model& model) { - return {.operands = convertToV1_2(model.operands), - .operations = convertToV1_2(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_2::Model convertToV1_2(const V1_2::Model& model) { - return model; -} - -V1_2::Model convertToV1_2(const V1_3::Model& model) { - if (!compliantWithV1_2(model)) { - LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model)) - << " from V1_3::Model to V1_2::Model"; - } - return {.operands = convertToV1_2(model.main.operands), - .operations = uncheckedConvertToV1_2(model.main.operations), - .inputIndexes = model.main.inputIndexes, - .outputIndexes = model.main.outputIndexes, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = model.extensionNameToPrefix}; -} - -V1_3::Model convertToV1_3(const V1_0::Model& model) { - return {.main = {.operands = convertToV1_3(model.operands), - .operations = convertToV1_3(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes}, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = false}; -} - -V1_3::Model convertToV1_3(const V1_1::Model& model) { - return {.main = {.operands = convertToV1_3(model.operands), - .operations = convertToV1_3(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes}, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16}; -} - -V1_3::Model convertToV1_3(const V1_2::Model& model) { - return {.main = {.operands = convertToV1_3(model.operands), - .operations = convertToV1_3(model.operations), - .inputIndexes = model.inputIndexes, - .outputIndexes = model.outputIndexes}, - .operandValues = model.operandValues, - .pools = model.pools, - .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16, - .extensionNameToPrefix = model.extensionNameToPrefix}; -} - -V1_3::Model convertToV1_3(const V1_3::Model& model) { - return model; -} - -bool compliantWithV1_0(const V1_0::Request& request) { - return true; -} - -bool compliantWithV1_0(const V1_3::Request& request) { - return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { - if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { - return false; - } - const auto& name = pool.hidlMemory().name(); - return name == "ashmem" || name == "mmap_fd"; - }); -} - -bool compliantWithV1_2(const V1_3::Request& request) { - return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) { - if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) { - return false; - } - const auto& name = pool.hidlMemory().name(); - return name == "ashmem" || name == "mmap_fd" || name == "hardware_buffer_blob" || - name == "hardware_buffer"; - }); -} - -static hardware::hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) { - switch (pool.getDiscriminator()) { - case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory: - return pool.hidlMemory(); - case V1_3::Request::MemoryPool::hidl_discriminator::token: - return hardware::hidl_memory{}; - } -} - -static V1_3::Request::MemoryPool convertToV1_3(const hardware::hidl_memory& pool) { - V1_3::Request::MemoryPool ret; - ret.hidlMemory(pool); - return ret; -} - -V1_0::Request convertToV1_0(const V1_0::Request& request) { - return request; -} - -static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) { - hardware::hidl_vec pools(request.pools.size()); - std::transform(request.pools.begin(), request.pools.end(), pools.begin(), - [](const auto& pool) { return convertToV1_0(pool); }); - return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; -} - -V1_0::Request convertToV1_0(const V1_3::Request& request) { - if (!compliantWithV1_0(request)) { - LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) - << " from V1_3::Request to V1_0::Request of version 1.0"; - } - return uncheckedConvertToV1_0(request); -} - -V1_0::Request convertToV1_2(const V1_3::Request& request) { - if (!compliantWithV1_2(request)) { - LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request)) - << " from V1_3::Request to V1_0::Request of version 1.2"; - } - return uncheckedConvertToV1_0(request); -} - -V1_3::Request convertToV1_3(const V1_0::Request& request) { - hardware::hidl_vec pools(request.pools.size()); - std::transform(request.pools.begin(), request.pools.end(), pools.begin(), - [](const auto& pool) { return convertToV1_3(pool); }); - return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)}; -} - -V1_3::Request convertToV1_3(const V1_3::Request& request) { - return request; -} - -FenceState syncWait(int fd, int timeout) { - // This implementation is directly based on the ::sync_wait() implementation. - - struct pollfd fds; - int ret; - - if (fd < 0) { - errno = EINVAL; - return FenceState::UNKNOWN; - } - - fds.fd = fd; - fds.events = POLLIN; - - do { - ret = poll(&fds, 1, timeout); - if (ret > 0) { - if (fds.revents & POLLNVAL) { - errno = EINVAL; - return FenceState::UNKNOWN; - } - if (fds.revents & POLLERR) { - errno = EINVAL; - return FenceState::ERROR; - } - return FenceState::SIGNALED; - } else if (ret == 0) { - errno = ETIME; - return FenceState::ACTIVE; - } - } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); - - return FenceState::UNKNOWN; -} - -#ifdef NN_DEBUGGABLE -uint32_t getProp(const char* str, uint32_t defaultValue) { - const std::string propStr = android::base::GetProperty(str, ""); - if (propStr.size() > 0) { - return std::stoi(propStr); - } else { - return defaultValue; + return defaultValue; } } #endif // NN_DEBUGGABLE -ErrorStatus uncheckedConvert(V1_0::ErrorStatus status) { - return nnTryGetValue(convert(status)); -} - -ErrorStatus uncheckedConvert(V1_3::ErrorStatus status) { - return nnTryGetValue(convert(status)); -} - -OperandType uncheckedConvert(V1_3::OperandType operandType) { - return nnTryGetValue(convert(operandType)); -} - -OperationType uncheckedConvert(V1_3::OperationType operandType) { - return nnTryGetValue(convert(operandType)); -} - -Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime) { - return nnTryGetValue(convert(lifetime)); -} - -MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure) { - return nnTryGetValue(convert(measure)); -} - -DataLocation uncheckedConvert(const V1_0::DataLocation& location) { - return nnTryGetValue(convert(location)); -} - -Operand uncheckedConvert(const V1_3::Operand& operand) { - return nnTryGetValue(convert(operand)); -} - -Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params) { - return nnTryGetValue(convert(params)); -} - -Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params) { - return nnTryGetValue(convert(params)); -} - -Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec& params) { - return params; -} - -Operation uncheckedConvert(const V1_3::Operation& operation) { - return nnTryGetValue(convert(operation)); -} - -template -static std::vector convertVec(const hardware::hidl_vec& items) { - std::vector result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const HalType& item) { return uncheckedConvert(item); }); - return result; -} - -Model uncheckedConvert(const V1_3::Model& model) { - return nnTryGetValue(convert(model)); -} - -Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph) { - return nnTryGetValue(convert(subgraph)); -} - -Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix& x) { - return nnTryGetValue(convert(x)); -} - -Request uncheckedConvert(const V1_3::Request& request) { - return nnTryGetValue(convert(request)); -} - -Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument) { - return nnTryGetValue(convert(requestArgument)); -} - -Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool) { - return nnTryGetValue(convert(memoryPool)); -} - -OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape) { - return nnTryGetValue(convert(outputShape)); -} - -std::vector uncheckedConvert( - const hardware::hidl_vec& outputShapes) { - return convertVec(outputShapes); -} - -Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities) { - return nnTryGetValue(convert(capabilities)); -} - -Capabilities::OperandPerformance uncheckedConvert( - const V1_3::Capabilities::OperandPerformance& operandPerformance) { - return nnTryGetValue(convert(operandPerformance)); -} - -Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo) { - return nnTryGetValue(convert(performanceInfo)); -} - -Extension uncheckedConvert(const V1_2::Extension& extension) { - return nnTryGetValue(convert(extension)); -} - -std::vector uncheckedConvert(const hardware::hidl_vec& extensions) { - return convertVec(extensions); -} - -Extension::OperandTypeInformation uncheckedConvert( - const V1_2::Extension::OperandTypeInformation& info) { - return nnTryGetValue(convert(info)); -} - -OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration) { - return nnTryGetValue(convert(timeoutDuration)); -} - -Timing uncheckedConvert(const V1_2::Timing& timing) { - return nnTryGetValue(convert(timing)); -} - -V1_0::ErrorStatus convertToV1_0(ErrorStatus status) { - return static_cast(static_cast(status)); -} - -V1_3::ErrorStatus convertToV1_3(ErrorStatus status) { - return nnTryGetValue(V1_3::utils::convert(status)); -} - -V1_3::OperandType convertToV1_3(OperandType operandType) { - return nnTryGetValue(V1_3::utils::convert(operandType)); -} - -V1_3::OperationType convertToV1_3(OperationType operandType) { - return nnTryGetValue(V1_3::utils::convert(operandType)); -} - -V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime) { - return nnTryGetValue(V1_3::utils::convert(lifetime)); -} - -V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference) { - return nnTryGetValue(V1_1::utils::convert(preference)); -} - -V1_3::Priority convertToV1_3(Priority priority) { - return nnTryGetValue(V1_3::utils::convert(priority)); -} - -V1_2::MeasureTiming convertToV1_2(MeasureTiming measure) { - return nnTryGetValue(V1_2::utils::convert(measure)); -} - -V1_0::DataLocation convertToV1_0(const DataLocation& location) { - return nnTryGetValue(V1_0::utils::convert(location)); -} - -V1_3::Operand convertToV1_3(const Operand& operand) { - return nnTryGetValue(V1_3::utils::convert(operand)); -} - -V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params) { - return nnTryGetValue(V1_2::utils::convert(params)); -} - -V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params) { - return nnTryGetValue(V1_2::utils::convert(params)); -} - -hardware::hidl_vec uncheckedConvert(const Operand::ExtensionParams& params) { - return params; -} - -V1_3::Operation convertToV1_3(const Operation& operation) { - return nnTryGetValue(V1_3::utils::convert(operation)); -} - -template -static hardware::hidl_vec convertVecToV1_0(const std::vector& items) { - hardware::hidl_vec result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const CanonicalType& item) { return convertToV1_0(item); }); - return result; -} - -template -static hardware::hidl_vec convertVecToV1_2(const std::vector& items) { - hardware::hidl_vec result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const CanonicalType& item) { return convertToV1_2(item); }); - return result; -} - -template -static hardware::hidl_vec convertVecToV1_3(const std::vector& items) { - hardware::hidl_vec result(items.size()); - std::transform(items.begin(), items.end(), result.begin(), - [](const CanonicalType& item) { return convertToV1_3(item); }); - return result; -} - -V1_2::OutputShape convertToV1_2(const OutputShape& outputShape) { - return nnTryGetValue(V1_2::utils::convert(outputShape)); -} - -hardware::hidl_vec convertToV1_2(const std::vector& outputShapes) { - return convertVecToV1_2(outputShapes); -} - -V1_3::Model convertToV1_3(const Model& model) { - return nnTryGetValue(V1_3::utils::convert(model)); -} - -V1_3::Subgraph convertToV1_3(const Model::Subgraph& subgraph) { - return nnTryGetValue(V1_3::utils::convert(subgraph)); -} - -V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix& x) { - return nnTryGetValue(V1_2::utils::convert(x)); -} - -V1_3::Request convertToV1_3(const Request& request) { - return nnTryGetValue(V1_3::utils::convert(request)); -} - -V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument) { - return nnTryGetValue(V1_0::utils::convert(requestArgument)); -} - -V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool) { - return nnTryGetValue(V1_3::utils::convert(memoryPool)); -} - -std::vector uncheckedConvert( - const hardware::hidl_vec& memoryPools) { - return convertVec(memoryPools); -} - -V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint) { - return nnTryGetValue(V1_3::utils::convert(timePoint)); -} - -V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration) { - return nnTryGetValue(V1_3::utils::convert(timeoutDuration)); -} - -V1_2::Timing convertToV1_2(const Timing& timing) { - return nnTryGetValue(V1_2::utils::convert(timing)); -} - -V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole) { - return nnTryGetValue(V1_3::utils::convert(bufferRole)); -} - -hardware::hidl_vec convertToV1_3(const std::vector& bufferRoles) { - return convertVecToV1_3(bufferRoles); -} - -hardware::hidl_vec convertToV1_0(const Model::OperandValues& operandValues) { - return nnTryGetValue(V1_0::utils::convert(operandValues)); -} - -hardware::hidl_memory convertToV1_0(const Memory& memory) { - return nnTryGetValue(V1_0::utils::convert(memory)); -} - -Memory uncheckedConvert(const hardware::hidl_memory& memory) { - return nnTryGetValue(convert(memory)); -} - -hardware::hidl_vec convertToV1_0(const std::vector& memories) { - return convertVecToV1_0(memories); -} - -std::vector uncheckedConvert(const hardware::hidl_vec& memories) { - return convertVec(memories); -} - -std::vector uncheckedConvert(const hardware::hidl_vec& subgraphs) { - return convertVec(subgraphs); -} - -std::vector uncheckedConvert(const hardware::hidl_vec& operands) { - return convertVec(operands); -} - } // namespace nn } // namespace android diff --git a/nn/common/include/LegacyHalUtils.h b/nn/common/include/LegacyHalUtils.h index cdaf91172..ffa4a8f6b 100644 --- a/nn/common/include/LegacyHalUtils.h +++ b/nn/common/include/LegacyHalUtils.h @@ -13,11 +13,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +// This file contains pre-canonical-types utility code and includes HAL +// utilities. LegacyUtils.h is the subset of these utilities that do not touch +// HAL. -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H +#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_HAL_UTILS_H +#define ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_HAL_UTILS_H #include +#include +#include #include #include @@ -26,107 +31,19 @@ #include #include "HalInterfaces.h" +#include "LegacyUtils.h" #include "NeuralNetworks.h" -#include "OperationResolver.h" #include "ValidateHal.h" -#include "nnapi/TypeUtils.h" -#include "nnapi/Types.h" namespace android { namespace nn { -// The number of data types (OperandCode) defined in NeuralNetworks.h. -const int kNumberOfDataTypes = 16; - -// The number of operation types (OperationCode) defined in NeuralNetworks.h. -const int kNumberOfOperationTypes = 102; -static_assert(kNumberOfOperationTypes == BuiltinOperationResolver::kNumberOfOperationTypes); - -// The number of execution preferences defined in NeuralNetworks.h. -const int kNumberOfPreferences = 3; - -// The number of data types (OperandCode) defined in NeuralNetworksOEM.h. -const int kNumberOfDataTypesOEM = 2; - -// The number of operation types (OperationCode) defined in NeuralNetworksOEM.h. -const int kNumberOfOperationTypesOEM = 1; - -// The lowest number assigned to any OEM Code in NeuralNetworksOEM.h. -const int kOEMCodeBase = 10000; - -/* IMPORTANT: if you change the following list, don't - * forget to update the corresponding 'tags' table in - * the initVlogMask() function implemented in Utils.cpp. - */ -enum VLogFlags { MODEL = 0, COMPILATION, EXECUTION, CPUEXE, MANAGER, DRIVER, MEMORY }; - -#define VLOG_IS_ON(TAG) ((vLogMask & (1 << (TAG))) != 0) - -#define VLOG(TAG) \ - if (LIKELY(!VLOG_IS_ON(TAG))) \ - ; \ - else \ - LOG(INFO) - -extern int vLogMask; -void initVLogMask(); - -#ifdef NN_DEBUGGABLE -#define SHOW_IF_DEBUG(msg) msg -#else -#define SHOW_IF_DEBUG(msg) "" -#endif - -// DEPRECATED(b/118737105). Use CHECK. -#define nnAssert(v) CHECK(v) - -#define NN_RETURN_IF_ERROR(expr) \ - do { \ - int _errorCode = (expr); \ - if (_errorCode != ANEURALNETWORKS_NO_ERROR) { \ - return _errorCode; \ - } \ - } while (0) - -// Make an TimeoutDuration from a duration in nanoseconds. If the value exceeds -// the max duration, return the maximum expressible duration. -TimeoutDuration makeTimeoutDuration(uint64_t nanoseconds); - -// Type to represent a deadline time point across processes. -using Deadline = std::chrono::steady_clock::time_point; - -// Make an Deadline from a duration. If the sum of the current time and the -// duration exceeds the max time, return a time point holding the maximum -// expressible time. -Deadline makeDeadline(TimeoutDuration duration); -inline Deadline makeDeadline(uint64_t duration) { - return makeDeadline(makeTimeoutDuration(duration)); -} - -// Convenience function. If the duration is provided, this function creates a -// Deadline using makeDeadline. If the duration is not provided, this function -// returns std::nullopt. -inline std::optional makeDeadline(OptionalTimeoutDuration duration) { - return duration.has_value() ? makeDeadline(*duration) : std::optional{}; -} -inline std::optional makeDeadline(std::optional duration) { - return duration.has_value() ? makeDeadline(*duration) : std::optional{}; -} - // Make an optional Deadline from an OptionalTimePoint. If // timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a // time point holding the maximum Deadline. If the OptionalTimePoint is none, // this function returns std::nullopt. std::optional makeDeadline(const V1_3::OptionalTimePoint& timePoint); -// Returns true if the deadline has passed. Returns false if either the deadline -// has not been exceeded or if the deadline is not present. -bool hasDeadlinePassed(const std::optional& deadline); - -// Make an OptionalTimePoint from an optional Deadline. If the Deadline is not -// provided, this function returns none for OptionalTimePoint. -OptionalTimePoint makeTimePoint(const std::optional& deadline); - // Ensure that every user of FalseyErrorStream is linked to the // correct instance, using the correct LOG_TAG namespace { @@ -198,7 +115,6 @@ bool isExtensionOperationType(V1_3::OperationType type); // See also TypeManager::getSizeOfData(OperandType, const std::vector&). uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, const std::vector& dimensions); -uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions); // Returns the amount of space needed to store a value of the dimensions and // type of this operand. For a tensor with unspecified rank or at least one @@ -208,46 +124,19 @@ uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions); - // Returns true if the amount of space needed to store a value of the specified // dimensions and element size overflows the uint32_t type. // // Aborts if the specified type is an extension type. // // See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector&). -bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, - const std::vector& dimensions); bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, const std::vector& dimensions); -// Returns true if the amount of space needed to store a value of the specified -// dimensions and element size overflows the uint32_t type. -// -// See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector&). -bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector& dimensions); - -// Returns true if a non-extension operand type is a scalar type. -// -// Aborts if the specified type is an extension type. -// -// See also TypeManager::isTensorType(OperandType). -bool nonExtensionOperandTypeIsScalar(int type); - // Returns the name of the operation type in ASCII. std::string getOperationName(V1_3::OperationType opCode); @@ -257,107 +146,26 @@ std::string getOperandTypeName(V1_3::OperandType type); // Whether an operand of tensor type has unspecified dimensions. // // Undefined behavior if the operand type is a scalar type. -bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount); bool tensorHasUnspecifiedDimensions(V1_3::OperandType type, const std::vector& dimensions); -bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector& dimensions); -bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions); -bool tensorHasUnspecifiedDimensions(const Operand& operand); bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand); -bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type); - -// Returns the number of padding bytes needed to align data of the -// specified length. It aligns object of length: -// 2, 3 on a 2 byte boundary, -// 4+ on a 4 byte boundary. -// We may want to have different alignments for tensors. -// TODO: This is arbitrary, more a proof of concept. We need -// to determine what this should be. -uint32_t alignBytesNeeded(uint32_t index, size_t length); // Does a detailed LOG(INFO) of the model void logModelToInfo(const V1_0::Model& model); void logModelToInfo(const V1_1::Model& model); void logModelToInfo(const V1_2::Model& model); void logModelToInfo(const V1_3::Model& model); -void logModelToInfo(const Model& model); - -inline std::string toString(uint32_t obj) { - return std::to_string(obj); -} - -template -std::string toString(const std::vector& range) { - std::string os = "["; - for (size_t i = 0; i < range.size(); ++i) { - os += (i == 0 ? "" : ", ") + toString(range[i]); - } - return os += "]"; -} - -template -std::string toString(const std::pair& pair) { - std::ostringstream oss; - oss << "(" << pair.first << ", " << pair.second << ")"; - return oss.str(); -} - -inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) { - return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM); -} bool validateOperandSymmPerChannelQuantParams( const V1_3::Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag); -// Validates an operand type. -// -// extensionOperandTypeInfo must be nullptr iff the type is not an extension type. -// -// If allowPartial is true, the dimensions may be underspecified. -int validateOperandType(const ANeuralNetworksOperandType& type, - const Extension::OperandTypeInformation* const extensionOperandTypeInfo, - const char* tag, bool allowPartial); -int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount, - const char* tag); - -// A set of functions to help validate models containing IF or WHILE operations. -struct SubgraphValidationHelper { - // Checks if a given operand is a SUBGRAPH operand with a valid offset. - std::function isValidSubgraphReference; - // Gets the input count of a subgraph referenced by a given operand. - std::function getSubgraphInputCount; - // Gets the output count of a subgraph referenced by a given operand. - std::function getSubgraphOutputCount; - // Gets the specified input operand of a subgraph referenced by a given operand. - std::function getSubgraphInputOperand; - // Gets the specified output operand of a subgraph referenced by a given operand. - std::function getSubgraphOutputOperand; - // Whether control flow operations with inner or outer input or output - // operands of unknown size are allowed. - bool allowControlFlowOperationWithOperandOfUnknownSize; -}; - -// Returns ANEURALNETWORKS_NO_ERROR if the corresponding operation is defined and can handle the -// provided operand types in the given HAL version, otherwise returns ANEURALNETWORKS_BAD_DATA. -// The last argument is only used for validating IF and WHILE operations. -int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount, - const uint32_t* inputIndexes, uint32_t outputCount, - const uint32_t* outputIndexes, const std::vector& operands, - HalVersion halVersion, const SubgraphValidationHelper& helper); - -inline size_t getSizeFromInts(int lower, int higher) { - return (uint32_t)(lower) + ((uint64_t)(uint32_t)(higher) << 32); -} - // Convert ANEURALNETWORKS_* result code to ErrorStatus. // Not guaranteed to be a 1-to-1 mapping. -ErrorStatus convertResultCodeToErrorStatus(int resultCode); V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode); // Convert ErrorStatus to ANEURALNETWORKS_* result code. // Not guaranteed to be a 1-to-1 mapping. -int convertErrorStatusToResultCode(ErrorStatus status); int convertErrorStatusToResultCode(V1_3::ErrorStatus status); // Convert execution results to runtime format. Additionally checks that the @@ -366,8 +174,6 @@ int convertErrorStatusToResultCode(V1_3::ErrorStatus status); std::tuple, Timing> getExecutionResult( V1_3::ErrorStatus status, const hardware::hidl_vec& outputShapes, const V1_2::Timing& timing); -std::tuple, Timing> getExecutionResult( - ErrorStatus status, std::vector outputShapes, Timing timing); // Versioning @@ -507,34 +313,6 @@ constexpr V1_3::Priority convertToHalPriority(int32_t priority) { return {}; } -constexpr Priority convertToCanonicalPriority(int32_t priority) { - switch (priority) { - case ANEURALNETWORKS_PRIORITY_LOW: - return Priority::LOW; - case ANEURALNETWORKS_PRIORITY_MEDIUM: - return Priority::MEDIUM; - case ANEURALNETWORKS_PRIORITY_HIGH: - return Priority::HIGH; - } - LOG(FATAL) << "unrecognized priority: " << priority; - return {}; -} - -// The function syncWait() has the same semantics as the system function -// ::sync_wait(), except that the syncWait() return value is semantically -// richer. The timeout parameter is in msecs. -enum class FenceState { - ACTIVE, // fence has not been signaled - SIGNALED, // fence has been signaled - ERROR, // fence has been placed in the error state - UNKNOWN, // either bad argument passed to syncWait(), or internal error -}; -FenceState syncWait(int fd, int timeout); - -#ifdef NN_DEBUGGABLE -uint32_t getProp(const char* str, uint32_t defaultValue = 0); -#endif // NN_DEBUGGABLE - // DEPRECATED. Use checked conversions from nnapi/hal/1.X/Conversions.h. Capabilities::OperandPerformance uncheckedConvert( const V1_3::Capabilities::OperandPerformance& operandPerformance); @@ -608,4 +386,4 @@ V1_3::Subgraph convertToV1_3(const Model::Subgraph& model); } // namespace nn } // namespace android -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H +#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_HAL_UTILS_H diff --git a/nn/common/include/LegacyUtils.h b/nn/common/include/LegacyUtils.h index cdaf91172..64ee835f8 100644 --- a/nn/common/include/LegacyUtils.h +++ b/nn/common/include/LegacyUtils.h @@ -13,24 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +// This file contains pre-canonical-types utility code and does not includes HAL +// utilities. LegacyHalUtils.h is a superset of these utilities that includes +// HAL utilities. -#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H -#define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H +#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_UTILS_H +#define ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_UTILS_H #include -#include #include #include #include #include -#include "HalInterfaces.h" +#include +#include #include "NeuralNetworks.h" #include "OperationResolver.h" -#include "ValidateHal.h" -#include "nnapi/TypeUtils.h" -#include "nnapi/Types.h" namespace android { namespace nn { @@ -113,12 +113,6 @@ inline std::optional makeDeadline(std::optional duration) { return duration.has_value() ? makeDeadline(*duration) : std::optional{}; } -// Make an optional Deadline from an OptionalTimePoint. If -// timePoint.nanosecondsSinceEpoch cannot be represented in Deadline, return a -// time point holding the maximum Deadline. If the OptionalTimePoint is none, -// this function returns std::nullopt. -std::optional makeDeadline(const V1_3::OptionalTimePoint& timePoint); - // Returns true if the deadline has passed. Returns false if either the deadline // has not been exceeded or if the deadline is not present. bool hasDeadlinePassed(const std::optional& deadline); @@ -127,66 +121,11 @@ bool hasDeadlinePassed(const std::optional& deadline); // provided, this function returns none for OptionalTimePoint. OptionalTimePoint makeTimePoint(const std::optional& deadline); -// Ensure that every user of FalseyErrorStream is linked to the -// correct instance, using the correct LOG_TAG -namespace { - -template -struct VersionedType {}; - -template <> -struct VersionedType { - using OperandPerformance = V1_2::Capabilities::OperandPerformance; - using OperandType = V1_2::OperandType; -}; - -template <> -struct VersionedType { - using OperandPerformance = V1_3::Capabilities::OperandPerformance; - using OperandType = V1_3::OperandType; -}; - -template -using VersionedOperandPerformance = typename VersionedType::OperandPerformance; -template -using VersionedOperandType = typename VersionedType::OperandType; - -} // namespace - -// Return a vector with one entry for each non-extension OperandType except -// SUBGRAPH, set to the specified PerformanceInfo value. The vector will be -// sorted by OperandType. -// -// Control flow (OperandType::SUBGRAPH) operation performance is specified -// separately using Capabilities::ifPerformance and -// Capabilities::whilePerformance. -template -hardware::hidl_vec> nonExtensionOperandPerformance( - V1_0::PerformanceInfo perf); - -// Update the vector entry corresponding to the specified OperandType with the -// specified PerformanceInfo value. The vector must already have an entry for -// that OperandType, and must be sorted by OperandType. -void update(hardware::hidl_vec* operandPerformance, - V1_2::OperandType type, V1_0::PerformanceInfo perf); -void update(hardware::hidl_vec* operandPerformance, - V1_3::OperandType type, V1_0::PerformanceInfo perf); - -// Look for a vector entry corresponding to the specified OperandType. If -// found, return the associated PerformanceInfo. If not, return a pessimistic -// PerformanceInfo (FLT_MAX). The vector must be sorted by OperandType. -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec& operandPerformance, - V1_2::OperandType type); -V1_0::PerformanceInfo lookup( - const hardware::hidl_vec& operandPerformance, - V1_3::OperandType type); - // Returns true if an operand type is an extension type. -bool isExtensionOperandType(V1_3::OperandType type); +bool isExtensionOperandType(OperandType type); // Returns true if an operation type is an extension type. -bool isExtensionOperationType(V1_3::OperationType type); +bool isExtensionOperationType(OperationType type); // Returns the amount of space needed to store a value of the specified // dimensions and type. For a tensor with unspecified rank or at least one @@ -196,8 +135,6 @@ bool isExtensionOperationType(V1_3::OperationType type); // Aborts if the size would overflow the return type. // // See also TypeManager::getSizeOfData(OperandType, const std::vector&). -uint32_t nonExtensionOperandSizeOfData(V1_3::OperandType type, - const std::vector& dimensions); uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& dimensions); // Returns the amount of space needed to store a value of the dimensions and @@ -211,9 +148,6 @@ uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector& d // See also TypeManager::sizeOfDataOverflowsUInt32(OperandType, const std::vector&). bool nonExtensionOperandSizeOfDataOverflowsUInt32(OperandType type, const std::vector& dimensions); -bool nonExtensionOperandSizeOfDataOverflowsUInt32(V1_3::OperandType type, - const std::vector& dimensions); // Returns true if the amount of space needed to store a value of the specified // dimensions and element size overflows the uint32_t type. @@ -248,22 +180,13 @@ bool sizeOfTensorDataOverflowsUInt32(uint32_t elementSize, const std::vector& dimensions); bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector& dimensions); bool tensorHasUnspecifiedDimensions(OperandType type, const Dimensions& dimensions); bool tensorHasUnspecifiedDimensions(const Operand& operand); -bool tensorHasUnspecifiedDimensions(const V1_3::Operand& operand); bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type); // Returns the number of padding bytes needed to align data of the @@ -276,10 +199,6 @@ bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type); uint32_t alignBytesNeeded(uint32_t index, size_t length); // Does a detailed LOG(INFO) of the model -void logModelToInfo(const V1_0::Model& model); -void logModelToInfo(const V1_1::Model& model); -void logModelToInfo(const V1_2::Model& model); -void logModelToInfo(const V1_3::Model& model); void logModelToInfo(const Model& model); inline std::string toString(uint32_t obj) { @@ -306,10 +225,6 @@ inline bool validCode(uint32_t codeCount, uint32_t codeCountOEM, uint32_t code) return (code < codeCount) || (code >= kOEMCodeBase && (code - kOEMCodeBase) < codeCountOEM); } -bool validateOperandSymmPerChannelQuantParams( - const V1_3::Operand& halOperand, - const ANeuralNetworksSymmPerChannelQuantParams& channelQuant, const char* tag); - // Validates an operand type. // // extensionOperandTypeInfo must be nullptr iff the type is not an extension type. @@ -353,160 +268,17 @@ inline size_t getSizeFromInts(int lower, int higher) { // Convert ANEURALNETWORKS_* result code to ErrorStatus. // Not guaranteed to be a 1-to-1 mapping. ErrorStatus convertResultCodeToErrorStatus(int resultCode); -V1_3::ErrorStatus convertResultCodeToHalErrorStatus(int resultCode); // Convert ErrorStatus to ANEURALNETWORKS_* result code. // Not guaranteed to be a 1-to-1 mapping. int convertErrorStatusToResultCode(ErrorStatus status); -int convertErrorStatusToResultCode(V1_3::ErrorStatus status); // Convert execution results to runtime format. Additionally checks that the // returned results abide by the HAL specification, and logs an error if the // result violates the specification. -std::tuple, Timing> getExecutionResult( - V1_3::ErrorStatus status, const hardware::hidl_vec& outputShapes, - const V1_2::Timing& timing); std::tuple, Timing> getExecutionResult( ErrorStatus status, std::vector outputShapes, Timing timing); -// Versioning - -bool compliantWithV1_0(const V1_0::Capabilities& capabilities); -bool compliantWithV1_0(const V1_1::Capabilities& capabilities); -bool compliantWithV1_0(const V1_2::Capabilities& capabilities); -bool compliantWithV1_0(const V1_3::Capabilities& capabilities); -bool compliantWithV1_1(const V1_0::Capabilities& capabilities); -bool compliantWithV1_1(const V1_1::Capabilities& capabilities); -bool compliantWithV1_1(const V1_2::Capabilities& capabilities); -bool compliantWithV1_1(const V1_3::Capabilities& capabilities); -bool compliantWithV1_2(const V1_0::Capabilities& capabilities); -bool compliantWithV1_2(const V1_1::Capabilities& capabilities); -bool compliantWithV1_2(const V1_2::Capabilities& capabilities); -bool compliantWithV1_2(const V1_3::Capabilities& capabilities); -bool compliantWithV1_3(const V1_0::Capabilities& capabilities); -bool compliantWithV1_3(const V1_1::Capabilities& capabilities); -bool compliantWithV1_3(const V1_2::Capabilities& capabilities); -bool compliantWithV1_3(const V1_3::Capabilities& capabilities); - -// If noncompliantOperations != nullptr, then -// precondition: noncompliantOperations->empty() -// postcondition: *noncompliantOperations consists of the indices of the noncompliant -// operations; if the compliance check fails for some reason -// other than a noncompliant operation, -// *noncompliantOperations consists of the indices of all operations -bool compliantWithV1_0(const V1_0::Model& model); -bool compliantWithV1_0(const V1_1::Model& model); -bool compliantWithV1_0(const V1_2::Model& model, - std::set* noncompliantOperations = nullptr); -bool compliantWithV1_0(const V1_3::Model& model, - std::set* noncompliantOperations = nullptr); -bool compliantWithV1_1(const V1_0::Model& model); -bool compliantWithV1_1(const V1_1::Model& model); -bool compliantWithV1_1(const V1_2::Model& model, - std::set* noncompliantOperations = nullptr); -bool compliantWithV1_1(const V1_3::Model& model, - std::set* noncompliantOperations = nullptr); -bool compliantWithV1_2(const V1_0::Model& model); -bool compliantWithV1_2(const V1_1::Model& model); -bool compliantWithV1_2(const V1_2::Model& model, - std::set* noncompliantOperations = nullptr); -bool compliantWithV1_2(const V1_3::Model& model, - std::set* noncompliantOperations = nullptr); - -V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status); -V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status); -V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status); -V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status); - -V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities); -V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities); -V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities); -V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities); -V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities); -V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities); -V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities); - -V1_0::Model convertToV1_0(const V1_0::Model& model); -V1_0::Model convertToV1_0(const V1_1::Model& model); -V1_0::Model convertToV1_0(const V1_2::Model& model); -V1_0::Model convertToV1_0(const V1_3::Model& model); -V1_1::Model convertToV1_1(const V1_0::Model& model); -V1_1::Model convertToV1_1(const V1_1::Model& model); -V1_1::Model convertToV1_1(const V1_2::Model& model); -V1_1::Model convertToV1_1(const V1_3::Model& model); -V1_2::Model convertToV1_2(const V1_0::Model& model); -V1_2::Model convertToV1_2(const V1_1::Model& model); -V1_2::Model convertToV1_2(const V1_2::Model& model); -V1_2::Model convertToV1_2(const V1_3::Model& model); -V1_3::Model convertToV1_3(const V1_0::Model& model); -V1_3::Model convertToV1_3(const V1_1::Model& model); -V1_3::Model convertToV1_3(const V1_2::Model& model); -V1_3::Model convertToV1_3(const V1_3::Model& model); - -V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type); -V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type); -V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type); - -V1_0::Operand convertToV1_0(const V1_2::Operand& operand); -V1_0::Operand convertToV1_0(const V1_3::Operand& operand); -V1_2::Operand convertToV1_2(const V1_0::Operand& operand); -V1_2::Operand convertToV1_2(const V1_3::Operand& operand); -V1_3::Operand convertToV1_3(const V1_0::Operand& operand); -V1_3::Operand convertToV1_3(const V1_2::Operand& operand); -V1_3::Operand convertToV1_3(const V1_3::Operand& operand); - -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_0(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_2(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); -hardware::hidl_vec convertToV1_3(const hardware::hidl_vec& operands); - -bool compliantWithV1_0(const V1_0::Request& request); -bool compliantWithV1_0(const V1_3::Request& request); -bool compliantWithV1_2(const V1_3::Request& request); - -V1_0::Request convertToV1_0(const V1_0::Request& request); -V1_0::Request convertToV1_0(const V1_3::Request& request); -V1_0::Request convertToV1_2(const V1_3::Request& request); -V1_3::Request convertToV1_3(const V1_0::Request& request); -V1_3::Request convertToV1_3(const V1_3::Request& request); - -bool compliantWithV1_0(V1_0::OperandLifeTime lifetime); -bool compliantWithV1_0(V1_3::OperandLifeTime lifetime); -bool compliantWithV1_3(V1_0::OperandLifeTime lifetime); -bool compliantWithV1_3(V1_3::OperandLifeTime lifetime); - -V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime); -V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime); -V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime); -V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime); - -constexpr V1_3::Priority convertToHalPriority(int32_t priority) { - switch (priority) { - case ANEURALNETWORKS_PRIORITY_LOW: - return V1_3::Priority::LOW; - case ANEURALNETWORKS_PRIORITY_MEDIUM: - return V1_3::Priority::MEDIUM; - case ANEURALNETWORKS_PRIORITY_HIGH: - return V1_3::Priority::HIGH; - } - LOG(FATAL) << "unrecognized priority: " << priority; - return {}; -} - constexpr Priority convertToCanonicalPriority(int32_t priority) { switch (priority) { case ANEURALNETWORKS_PRIORITY_LOW: @@ -535,77 +307,7 @@ FenceState syncWait(int fd, int timeout); uint32_t getProp(const char* str, uint32_t defaultValue = 0); #endif // NN_DEBUGGABLE -// DEPRECATED. Use checked conversions from nnapi/hal/1.X/Conversions.h. -Capabilities::OperandPerformance uncheckedConvert( - const V1_3::Capabilities::OperandPerformance& operandPerformance); -Capabilities::PerformanceInfo uncheckedConvert(const V1_0::PerformanceInfo& performanceInfo); -Capabilities uncheckedConvert(const V1_3::Capabilities& capabilities); -DataLocation uncheckedConvert(const V1_0::DataLocation& location); -ErrorStatus uncheckedConvert(V1_0::ErrorStatus status); -ErrorStatus uncheckedConvert(V1_3::ErrorStatus status); -Extension::OperandTypeInformation uncheckedConvert(const V1_2::Extension::OperandTypeInformation&); -Extension uncheckedConvert(const V1_2::Extension& extension); -hardware::hidl_vec uncheckedConvert(const Operand::ExtensionParams& params); -MeasureTiming uncheckedConvert(V1_2::MeasureTiming measure); -Memory uncheckedConvert(const hardware::hidl_memory& memory); -Model::ExtensionNameAndPrefix uncheckedConvert(const V1_2::Model::ExtensionNameAndPrefix&); -Model::Subgraph uncheckedConvert(const V1_3::Subgraph& subgraph); -Model uncheckedConvert(const V1_3::Model& model); -Operand::ExtensionParams uncheckedConvert(const hardware::hidl_vec& params); -Operand::ExtraParams uncheckedConvert(const V1_2::Operand::ExtraParams& params); -Operand::LifeTime uncheckedConvert(V1_3::OperandLifeTime lifetime); -Operand::SymmPerChannelQuantParams uncheckedConvert(const V1_2::SymmPerChannelQuantParams& params); -OperandType uncheckedConvert(V1_3::OperandType operandType); -Operand uncheckedConvert(const V1_3::Operand& operand); -OperationType uncheckedConvert(V1_3::OperationType operationType); -Operation uncheckedConvert(const V1_3::Operation& operation); -OptionalTimeoutDuration uncheckedConvert(const V1_3::OptionalTimeoutDuration& timeoutDuration); -OutputShape uncheckedConvert(const V1_2::OutputShape& outputShape); -Request::Argument uncheckedConvert(const V1_0::RequestArgument& requestArgument); -Request::MemoryPool uncheckedConvert(const V1_3::Request::MemoryPool& memoryPool); -Request uncheckedConvert(const V1_3::Request& request); -std::vector uncheckedConvert(const hardware::hidl_vec& extensions); -std::vector uncheckedConvert(const hardware::hidl_vec& memories); -std::vector uncheckedConvert(const hardware::hidl_vec& subgraphs); -std::vector uncheckedConvert(const hardware::hidl_vec& operands); -std::vector uncheckedConvert( - const hardware::hidl_vec& outputShapes); -std::vector uncheckedConvert( - const hardware::hidl_vec& memoryPools); -Timing uncheckedConvert(const V1_2::Timing& timing); - -// DEPRECATED. Use conversions from nnapi/hal/1.X/Conversions.h. -hardware::hidl_memory convertToV1_0(const Memory& memory); -hardware::hidl_vec convertToV1_0(const std::vector& memories); -hardware::hidl_vec convertToV1_0(const Model::OperandValues& operandValues); -hardware::hidl_vec convertToV1_2(const std::vector& outputShapes); -hardware::hidl_vec convertToV1_3(const std::vector& bufferRoles); -V1_0::DataLocation convertToV1_0(const DataLocation& location); -V1_0::ErrorStatus convertToV1_0(ErrorStatus status); -V1_0::RequestArgument convertToV1_0(const Request::Argument& requestArgument); -V1_1::ExecutionPreference convertToV1_1(ExecutionPreference preference); -V1_2::MeasureTiming convertToV1_2(MeasureTiming measure); -V1_2::Model::ExtensionNameAndPrefix convertToV1_2(const Model::ExtensionNameAndPrefix&); -V1_2::Operand::ExtraParams convertToV1_2(const Operand::ExtraParams& params); -V1_2::OutputShape convertToV1_2(const OutputShape& outputShape); -V1_2::SymmPerChannelQuantParams convertToV1_2(const Operand::SymmPerChannelQuantParams& params); -V1_2::Timing convertToV1_2(const Timing& timing); -V1_3::BufferRole convertToV1_3(const BufferRole& bufferRole); -V1_3::ErrorStatus convertToV1_3(ErrorStatus status); -V1_3::Model convertToV1_3(const Model& model); -V1_3::Operand convertToV1_3(const Operand& operand); -V1_3::OperandLifeTime convertToV1_3(Operand::LifeTime lifetime); -V1_3::OperandType convertToV1_3(OperandType operandType); -V1_3::Operation convertToV1_3(const Operation& operation); -V1_3::OperationType convertToV1_3(OperationType operationType); -V1_3::OptionalTimeoutDuration convertToV1_3(const OptionalTimeoutDuration& timeoutDuration); -V1_3::OptionalTimePoint convertToV1_3(const OptionalTimePoint& timePoint); -V1_3::Priority convertToV1_3(Priority priority); -V1_3::Request convertToV1_3(const Request& request); -V1_3::Request::MemoryPool convertToV1_3(const Request::MemoryPool& memoryPool); -V1_3::Subgraph convertToV1_3(const Model::Subgraph& model); - } // namespace nn } // namespace android -#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H +#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_LEGACY_UTILS_H diff --git a/nn/common/include/Utils.h b/nn/common/include/Utils.h new file mode 100644 index 000000000..7ac1e59c7 --- /dev/null +++ b/nn/common/include/Utils.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H +#define ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H + +#include "LegacyHalUtils.h" +#include "LegacyUtils.h" + +#endif // ANDROID_FRAMEWORKS_ML_NN_COMMON_UTILS_H -- cgit v1.2.3 From 083da328b8899ce08f63fc564db444669f62b324 Mon Sep 17 00:00:00 2001 From: Slava Shklyaev Date: Mon, 2 Nov 2020 20:00:18 +0000 Subject: Replace native_handle with unique_fd This change replaces native_handle_t from libutils with a custom type that wraps std::vector and std::vector. Bug: 160669116 Test: NNT_static Change-Id: If4d767b35c2d48cabcb6a9a3436079f6f7d39a13 Merged-In: If4d767b35c2d48cabcb6a9a3436079f6f7d39a13 (cherry picked from commit b3c5d46cd1a7e8c7f4c624d2c24bf721f4d0edb2) --- nn/common/SharedMemoryAndroid.cpp | 124 ++++++++++++++++++---------- nn/common/SharedMemoryHost.cpp | 57 ++++++------- nn/common/TypeUtils.cpp | 2 +- nn/common/Types.cpp | 32 +++++-- nn/common/Validation.cpp | 14 ++-- nn/common/include/nnapi/IDevice.h | 8 +- nn/common/include/nnapi/TypeUtils.h | 2 +- nn/common/include/nnapi/Types.h | 23 ++++-- nn/common/include/nnapi/Validation.h | 4 +- nn/runtime/Manager.cpp | 34 +++----- nn/runtime/VersionedInterfaces.cpp | 67 +++++++++------ nn/runtime/VersionedInterfaces.h | 14 ++-- nn/runtime/test/TestVersionedInterfaces.cpp | 24 +++--- 13 files changed, 232 insertions(+), 173 deletions(-) diff --git a/nn/common/SharedMemoryAndroid.cpp b/nn/common/SharedMemoryAndroid.cpp index 9baca73c5..18881e04a 100644 --- a/nn/common/SharedMemoryAndroid.cpp +++ b/nn/common/SharedMemoryAndroid.cpp @@ -19,12 +19,12 @@ #include #include #include -#include #include #include #include #include +#include #include #include #include @@ -66,28 +66,74 @@ GeneralResult allocateSharedMemory(size_t size) { return maybeMemory; } -Memory createMemory(const hidl_memory& memory) { - CHECK_LE(memory.size(), std::numeric_limits::max()); +GeneralResult hidlHandleFromSharedHandle(const SharedHandle& handle) { + if (handle == nullptr) { + return {}; + } - auto* cloned = native_handle_clone(memory.handle()); - auto nativeHandle = ::android::NativeHandle::create(cloned, /*ownsHandle=*/true); + std::vector fds; + fds.reserve(handle->fds.size()); + for (const auto& fd : handle->fds) { + int dupFd = dup(fd); + if (dupFd == -1) { + return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd"; + } + fds.emplace_back(dupFd); + } - return { - .handle = std::move(nativeHandle), + native_handle_t* nativeHandle = native_handle_create(handle->fds.size(), handle->ints.size()); + if (nativeHandle == nullptr) { + return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle"; + } + for (size_t i = 0; i < fds.size(); ++i) { + nativeHandle->data[i] = fds[i].release(); + } + std::copy(handle->ints.begin(), handle->ints.end(), &nativeHandle->data[nativeHandle->numFds]); + + hardware::hidl_handle hidlHandle; + hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true); + return hidlHandle; +} + +GeneralResult sharedHandleFromNativeHandle(const native_handle_t* handle) { + if (handle == nullptr) { + return nullptr; + } + + std::vector fds; + fds.reserve(handle->numFds); + for (int i = 0; i < handle->numFds; ++i) { + int dupFd = dup(handle->data[i]); + if (dupFd == -1) { + return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to dup the fd"; + } + fds.emplace_back(dupFd); + } + + std::vector ints(&handle->data[handle->numFds], + &handle->data[handle->numFds + handle->numInts]); + + return std::make_shared(Handle{ + .fds = std::move(fds), + .ints = std::move(ints), + }); +} + +GeneralResult createMemory(const hidl_memory& memory) { + CHECK_LE(memory.size(), std::numeric_limits::max()); + return Memory{ + .handle = NN_TRY(sharedHandleFromNativeHandle(memory.handle())), .size = static_cast(memory.size()), .name = memory.name(), }; } -hidl_memory createHidlMemory(const Memory& memory) { - const auto hidlMemory = hidl_memory(memory.name, memory.handle->handle(), memory.size); - // Copy memory to force the native_handle_t to be copied. - auto copiedMemory = hidlMemory; - return copiedMemory; +GeneralResult createHidlMemory(const Memory& memory) { + return hidl_memory(memory.name, NN_TRY(hidlHandleFromSharedHandle(memory.handle)), memory.size); } GeneralResult mapAshmem(const Memory& memory) { - const auto hidlMemory = createHidlMemory(memory); + const auto hidlMemory = NN_TRY(createHidlMemory(memory)); const auto mapping = mapMemory(hidlMemory); if (mapping == nullptr) { return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to map memory"; @@ -116,10 +162,10 @@ struct MmapFdMappingContext { GeneralResult mapMemFd(const Memory& memory) { const size_t size = memory.size; - const native_handle_t* handle = memory.handle->handle(); - const int fd = handle->data[0]; - const int prot = handle->data[1]; - const size_t offset = getOffsetFromInts(handle->data[2], handle->data[3]); + const SharedHandle& handle = memory.handle; + const int fd = handle->fds[0]; + const int prot = handle->ints[0]; + const size_t offset = getOffsetFromInts(handle->ints[1], handle->ints[2]); std::shared_ptr mapping = base::MappedFile::FromFd(fd, offset, size, prot); if (mapping == nullptr) { @@ -132,7 +178,7 @@ GeneralResult mapMemFd(const Memory& memory) { } GeneralResult mapAhwbBlobMemory(const Memory& memory) { - const auto* handle = memory.handle->handle(); + const SharedHandle& handle = memory.handle; const auto size = memory.size; const auto format = AHARDWAREBUFFER_FORMAT_BLOB; const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN; @@ -152,7 +198,8 @@ GeneralResult mapAhwbBlobMemory(const Memory& memory) { AHardwareBuffer* hardwareBuffer = nullptr; status_t status = AHardwareBuffer_createFromHandle( - &desc, handle, AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &hardwareBuffer); + &desc, NN_TRY(hidlHandleFromSharedHandle(handle)), + AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE, &hardwareBuffer); if (status != NO_ERROR) { return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Can't create AHardwareBuffer from handle. Error: " << status; @@ -196,31 +243,23 @@ GeneralResult createSharedMemoryFromFd(size_t size, int prot, int fd, si } // Duplicate the file descriptor so the resultant Memory owns its own version. - int dupfd = dup(fd); - if (dupfd == -1) { + int dupFd = dup(fd); + if (dupFd == -1) { // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here? return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Failed to dup the fd"; } - // Create a temporary native handle to own the dupfd. - native_handle_t* nativeHandle = native_handle_create(1, 3); - if (nativeHandle == nullptr) { - close(dupfd); - // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here? - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle"; - } + std::vector fds; + fds.emplace_back(dupFd); const auto [lowOffsetBits, highOffsetBits] = getIntsFromOffset(offset); - nativeHandle->data[0] = dupfd; - nativeHandle->data[1] = prot; - nativeHandle->data[2] = lowOffsetBits; - nativeHandle->data[3] = highOffsetBits; - - // Create a NativeHandle which owns the native handle and fd so that we don't have to manually - // clean either the native handle or the fd. - auto ownedHandle = ::android::NativeHandle::create(nativeHandle, /*ownsHandle=*/true); + std::vector ints = {prot, lowOffsetBits, highOffsetBits}; - return Memory{.handle = std::move(ownedHandle), .size = size, .name = "mmap_fd"}; + SharedHandle handle = std::make_shared(Handle{ + .fds = std::move(fds), + .ints = std::move(ints), + }); + return Memory{.handle = std::move(handle), .size = size, .name = "mmap_fd"}; } GeneralResult createSharedMemoryFromHidlMemory(const hardware::hidl_memory& memory) { @@ -232,19 +271,20 @@ GeneralResult createSharedMemoryFromAHWB(const AHardwareBuffer& ahwb) { AHardwareBuffer_describe(&ahwb, &bufferDesc); const native_handle_t* handle = AHardwareBuffer_getNativeHandle(&ahwb); - auto* cloned = native_handle_clone(handle); - auto nativeHandle = ::android::NativeHandle::create(cloned, /*ownsHandle=*/true); - if (bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB) { return Memory{ - .handle = std::move(nativeHandle), + .handle = NN_TRY(sharedHandleFromNativeHandle(handle)), .size = bufferDesc.width, .name = "hardware_buffer_blob", }; } // memory size is not used for non-BLOB AHWB memory. - return Memory{.handle = std::move(nativeHandle), .size = 0, .name = "hardware_buffer"}; + return Memory{ + .handle = NN_TRY(sharedHandleFromNativeHandle(handle)), + .size = 0, + .name = "hardware_buffer", + }; } GeneralResult map(const Memory& memory) { diff --git a/nn/common/SharedMemoryHost.cpp b/nn/common/SharedMemoryHost.cpp index 231977cf7..eeb49075e 100644 --- a/nn/common/SharedMemoryHost.cpp +++ b/nn/common/SharedMemoryHost.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include "Result.h" #include "SharedMemory.h" @@ -36,7 +37,7 @@ GeneralResult mapAshmem(const Memory& memory) { CHECK_LE(memory.size, std::numeric_limits::max()); const auto size = memory.size; - int fd = memory.handle->handle()->data[0]; + const int fd = memory.handle->fds[0]; std::shared_ptr mapping = base::MappedFile::FromFd(fd, /*offset=*/0, size, PROT_READ | PROT_WRITE); if (mapping == nullptr) { @@ -54,10 +55,10 @@ struct MmapFdMappingContext { GeneralResult mapMemFd(const Memory& memory) { const size_t size = memory.size; - const native_handle_t* handle = memory.handle->handle(); - const int fd = handle->data[0]; - const int prot = handle->data[1]; - const size_t offset = getOffsetFromInts(handle->data[2], handle->data[3]); + const SharedHandle& handle = memory.handle; + const int fd = handle->fds[0]; + const int prot = handle->ints[0]; + const size_t offset = getOffsetFromInts(handle->ints[1], handle->ints[2]); std::shared_ptr mapping = base::MappedFile::FromFd(fd, offset, size, prot); if (mapping == nullptr) { @@ -78,18 +79,14 @@ GeneralResult createSharedMemory(size_t size) { << "ashmem_create_region(" << size << ") fails with " << fd; } - native_handle_t* handle = native_handle_create(1, 0); - if (handle == nullptr) { - // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here? - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle"; - } - handle->data[0] = fd; - - // Create a NativeHandle which owns the native handle and fd so that we don't have to manually - // clean either the native handle or the fd. - auto nativeHandle = ::android::NativeHandle::create(handle, /*ownsHandle=*/true); + std::vector fds; + fds.emplace_back(fd); - return Memory{.handle = std::move(nativeHandle), .size = size, .name = "ashmem"}; + SharedHandle handle = std::make_shared(Handle{ + .fds = std::move(fds), + .ints = {}, + }); + return Memory{.handle = std::move(handle), .size = size, .name = "ashmem"}; } GeneralResult createSharedMemoryFromFd(size_t size, int prot, int fd, size_t offset) { @@ -98,31 +95,23 @@ GeneralResult createSharedMemoryFromFd(size_t size, int prot, int fd, si } // Duplicate the file descriptor so the resultant Memory owns its own version. - int dupfd = dup(fd); - if (dupfd == -1) { + int dupFd = dup(fd); + if (dupFd == -1) { // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here? return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Failed to dup the fd"; } - // Create a temporary native handle to own the dupfd. - native_handle_t* nativeHandle = native_handle_create(1, 3); - if (nativeHandle == nullptr) { - close(dupfd); - // TODO(b/120417090): is ANEURALNETWORKS_UNEXPECTED_NULL the correct error to return here? - return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Failed to create native_handle"; - } + std::vector fds; + fds.emplace_back(dupFd); const auto [lowOffsetBits, highOffsetBits] = getIntsFromOffset(offset); - nativeHandle->data[0] = dupfd; - nativeHandle->data[1] = prot; - nativeHandle->data[2] = lowOffsetBits; - nativeHandle->data[3] = highOffsetBits; - - // Create a NativeHandle which owns the native handle and fd so that we don't have to manually - // clean either the native handle or the fd. - auto ownedHandle = ::android::NativeHandle::create(nativeHandle, /*ownsHandle=*/true); + std::vector ints = {prot, lowOffsetBits, highOffsetBits}; - return Memory{.handle = std::move(ownedHandle), .size = size, .name = "mmap_fd"}; + SharedHandle handle = std::make_shared(Handle{ + .fds = std::move(fds), + .ints = std::move(ints), + }); + return Memory{.handle = std::move(handle), .size = size, .name = "mmap_fd"}; } GeneralResult createSharedMemoryFromHidlMemory(const hardware::hidl_memory& /*memory*/) { diff --git a/nn/common/TypeUtils.cpp b/nn/common/TypeUtils.cpp index 6d089bd2a..79d493e14 100644 --- a/nn/common/TypeUtils.cpp +++ b/nn/common/TypeUtils.cpp @@ -701,7 +701,7 @@ std::ostream& operator<<(std::ostream& os, const Operation& operation) { << ", .outputs=" << operation.outputs << "}"; } -std::ostream& operator<<(std::ostream& os, const NativeHandle& handle) { +std::ostream& operator<<(std::ostream& os, const SharedHandle& handle) { return os << (handle != nullptr ? "" : ""); } diff --git a/nn/common/Types.cpp b/nn/common/Types.cpp index 17485f442..e2e4cf112 100644 --- a/nn/common/Types.cpp +++ b/nn/common/Types.cpp @@ -17,7 +17,6 @@ #include "Types.h" #include -#include #include #include @@ -25,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -128,24 +128,32 @@ SyncFence SyncFence::createAsSignaled() { return SyncFence(nullptr); } -Result SyncFence::create(NativeHandle syncFence) { - const bool isValid = (syncFence != nullptr && syncFence->handle() != nullptr && - syncFence->handle()->numFds == 1 && syncFence->handle()->numInts == 0 && - &syncFence->handle()->data[0] != nullptr); +SyncFence SyncFence::create(base::unique_fd fd) { + std::vector fds; + fds.push_back(std::move(fd)); + return SyncFence(std::make_shared(Handle{ + .fds = std::move(fds), + .ints = {}, + })); +} + +Result SyncFence::create(SharedHandle syncFence) { + const bool isValid = + (syncFence != nullptr && syncFence->fds.size() == 1 && syncFence->ints.empty()); if (!isValid) { return NN_ERROR() << "Invalid sync fence handle passed to SyncFence::create"; } return SyncFence(std::move(syncFence)); } -SyncFence::SyncFence(NativeHandle syncFence) : mSyncFence(std::move(syncFence)) {} +SyncFence::SyncFence(SharedHandle syncFence) : mSyncFence(std::move(syncFence)) {} SyncFence::FenceState SyncFence::syncWait(OptionalTimeout optionalTimeout) const { if (mSyncFence == nullptr) { return FenceState::SIGNALED; } - const int fd = mSyncFence->handle()->data[0]; + const int fd = mSyncFence->fds.front().get(); const int timeout = optionalTimeout.value_or(Timeout{-1}).count(); // This implementation is directly based on the ::sync_wait() implementation. @@ -182,8 +190,16 @@ SyncFence::FenceState SyncFence::syncWait(OptionalTimeout optionalTimeout) const return FenceState::UNKNOWN; } -NativeHandle SyncFence::getHandle() const { +SharedHandle SyncFence::getSharedHandle() const { return mSyncFence; } +bool SyncFence::hasFd() const { + return mSyncFence != nullptr; +} + +int SyncFence::getFd() const { + return mSyncFence == nullptr ? -1 : mSyncFence->fds.front().get(); +} + } // namespace android::nn diff --git a/nn/common/Validation.cpp b/nn/common/Validation.cpp index d37c447c0..1c939ba40 100644 --- a/nn/common/Validation.cpp +++ b/nn/common/Validation.cpp @@ -679,13 +679,15 @@ Result validateOperations(const std::vector& operations, return version; } -Result validateNativeHandle(const NativeHandle& handle) { +Result validateSharedHandle(const SharedHandle& handle) { NN_VALIDATE(handle != nullptr); + NN_VALIDATE(std::all_of(handle->fds.begin(), handle->fds.end(), + [](const base::unique_fd& fd) { return fd.ok(); })); return Version::ANDROID_OC_MR1; } Result validateMemory(const Memory& memory) { - NN_TRY(validateNativeHandle(memory.handle)); + NN_TRY(validateSharedHandle(memory.handle)); if (memory.name == "ashmem") { NN_VALIDATE_NE(memory.size, 0u); @@ -2571,8 +2573,8 @@ Result validate(const Extension& extension) { return validateExtension(extension); } -Result validate(const NativeHandle& handle) { - return validateNativeHandle(handle); +Result validate(const SharedHandle& handle) { + return validateSharedHandle(handle); } Result validate(const Memory& memory) { @@ -2611,8 +2613,8 @@ Result validate(const std::vector& extensions) { return validateExtensions(extensions); } -Result validate(const std::vector& handles) { - return validateVector(handles, validateNativeHandle); +Result validate(const std::vector& handles) { + return validateVector(handles, validateSharedHandle); } Result validate(const std::vector& bufferRoles) { diff --git a/nn/common/include/nnapi/IDevice.h b/nn/common/include/nnapi/IDevice.h index 9a2e51680..75cf0a523 100644 --- a/nn/common/include/nnapi/IDevice.h +++ b/nn/common/include/nnapi/IDevice.h @@ -245,8 +245,8 @@ class IDevice { */ virtual GeneralResult prepareModel( const Model& model, ExecutionPreference preference, Priority priority, - OptionalTimePoint deadline, const std::vector& modelCache, - const std::vector& dataCache, const CacheToken& token) const = 0; + OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, const CacheToken& token) const = 0; /** * Creates a prepared model from cache files for execution. @@ -301,8 +301,8 @@ class IDevice { * for execution, otherwise GeneralError. */ virtual GeneralResult prepareModelFromCache( - OptionalTimePoint deadline, const std::vector& modelCache, - const std::vector& dataCache, const CacheToken& token) const = 0; + OptionalTimePoint deadline, const std::vector& modelCache, + const std::vector& dataCache, const CacheToken& token) const = 0; /** * Allocates a driver-managed buffer with the properties specified by the descriptor as well as diff --git a/nn/common/include/nnapi/TypeUtils.h b/nn/common/include/nnapi/TypeUtils.h index 6b2af916f..b32f78b66 100644 --- a/nn/common/include/nnapi/TypeUtils.h +++ b/nn/common/include/nnapi/TypeUtils.h @@ -88,7 +88,7 @@ std::ostream& operator<<(std::ostream& os, std::ostream& operator<<(std::ostream& os, const Operand::ExtraParams& extraParams); std::ostream& operator<<(std::ostream& os, const Operand& operand); std::ostream& operator<<(std::ostream& os, const Operation& operation); -std::ostream& operator<<(std::ostream& os, const NativeHandle& handle); +std::ostream& operator<<(std::ostream& os, const SharedHandle& handle); std::ostream& operator<<(std::ostream& os, const Memory& memory); std::ostream& operator<<(std::ostream& os, const Model::Subgraph& subgraph); std::ostream& operator<<(std::ostream& os, const Model::OperandValues& operandValues); diff --git a/nn/common/include/nnapi/Types.h b/nn/common/include/nnapi/Types.h index 3b9725d27..5adab6bfa 100644 --- a/nn/common/include/nnapi/Types.h +++ b/nn/common/include/nnapi/Types.h @@ -18,8 +18,7 @@ #define ANDROID_FRAMEWORKS_ML_NN_COMMON_NNAPI_TYPES_H #include -#include -#include +#include #include #include @@ -238,10 +237,15 @@ struct Operand { ExtraParams extraParams; }; -using NativeHandle = ::android::sp<::android::NativeHandle>; +struct Handle { + std::vector fds; + std::vector ints; +}; + +using SharedHandle = std::shared_ptr; struct Memory { - NativeHandle handle; + SharedHandle handle; size_t size = 0; std::string name; }; @@ -313,7 +317,8 @@ struct Request { class SyncFence { public: static SyncFence createAsSignaled(); - static Result create(NativeHandle syncFence); + static SyncFence create(base::unique_fd fd); + static Result create(SharedHandle syncFence); // The function syncWait() has the same semantics as the system function // ::sync_wait(), except that the syncWait() return value is semantically @@ -329,12 +334,14 @@ class SyncFence { FenceState syncWait(OptionalTimeout optionalTimeout) const; - NativeHandle getHandle() const; + SharedHandle getSharedHandle() const; + bool hasFd() const; + int getFd() const; private: - explicit SyncFence(NativeHandle syncFence); + explicit SyncFence(SharedHandle syncFence); - NativeHandle mSyncFence; + SharedHandle mSyncFence; }; using Clock = std::chrono::steady_clock; diff --git a/nn/common/include/nnapi/Validation.h b/nn/common/include/nnapi/Validation.h index 3eda174f6..ea213bdd8 100644 --- a/nn/common/include/nnapi/Validation.h +++ b/nn/common/include/nnapi/Validation.h @@ -42,7 +42,7 @@ Result validate(const OutputShape& outputShape); Result validate(const Timing& timing); Result validate(const Capabilities& capabilities); Result validate(const Extension& extension); -Result validate(const NativeHandle& handle); +Result validate(const SharedHandle& handle); Result validate(const Memory& memory); Result validate(const Model& model); Result validate(const BufferDesc& bufferDesc); @@ -53,7 +53,7 @@ Result validate(const OptionalTimeoutDuration& optionalTimeoutDuration) Result validate(const std::vector& outputShapes); Result validate(const std::vector& extensions); -Result validate(const std::vector& handles); +Result validate(const std::vector& handles); Result validate(const std::vector& bufferRoles); // Validate request applied to model. diff --git a/nn/runtime/Manager.cpp b/nn/runtime/Manager.cpp index 90d58e490..d977878cd 100644 --- a/nn/runtime/Manager.cpp +++ b/nn/runtime/Manager.cpp @@ -432,8 +432,6 @@ std::tuple, Timing> DriverPreparedM CHECK(std::all_of(waitFor.begin(), waitFor.end(), [](int fd) { return fd > 0; })); // Make a copy of the memory tracker as we will append memory pools for pointer arguments. std::vector localMemories = memories; - sp executeFencedCallback; - Timing timing; // We separate the input & output pools so accelerators only need to copy // the contents of the input pools. We could also use it to set protection @@ -443,12 +441,12 @@ std::tuple, Timing> DriverPreparedM const auto [n1, inputPtrArgsMemory, inputPtrArgsLocations] = allocatePointerArgumentsToPool(inputs, &localMemories); if (n1 != ANEURALNETWORKS_NO_ERROR) { - return {n1, -1, nullptr, timing}; + return {n1, -1, nullptr, {}}; } const auto [n2, outputPtrArgsMemory, outputPtrArgsLocations] = allocatePointerArgumentsToPool(outputs, &localMemories); if (n2 != ANEURALNETWORKS_NO_ERROR) { - return {n2, -1, nullptr, timing}; + return {n2, -1, nullptr, {}}; } // Copy the input data that was specified via a pointer. @@ -475,28 +473,18 @@ std::tuple, Timing> DriverPreparedM NNTRACE_FULL_SWITCH(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION, "DriverPreparedModel::executeFenced"); - int n = ANEURALNETWORKS_OP_FAILED; - hardware::hidl_vec waitForHandles; - waitForHandles.resize(waitFor.size()); - for (uint32_t i = 0; i < waitFor.size(); i++) { - native_handle_t* nativeHandle = native_handle_create(1, 0); - if (nativeHandle == nullptr) { - LOG(ERROR) << "Failed to create native_handle"; - return {n, -1, nullptr, timing}; - } - int dupFd = dup(waitFor[i]); + std::vector waitForHandles; + waitForHandles.reserve(waitFor.size()); + for (int fd : waitFor) { + int dupFd = dup(fd); if (dupFd <= 0) { LOG(ERROR) << "Unable to dup the file descriptor"; - return {n, -1, nullptr, timing}; + return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, {}}; } - nativeHandle->data[0] = dupFd; - hardware::hidl_handle hidlHandle; - hidlHandle.setTo(nativeHandle, /*shouldOwn=*/true); - waitForHandles[i] = std::move(hidlHandle); + waitForHandles.push_back(SyncFence::create(base::unique_fd(dupFd))); } - hardware::hidl_handle syncFence; - std::tie(n, syncFence, executeFencedCallback, timing) = + auto [n, syncFence, executeFencedCallback, timing] = mPreparedModel->executeFenced(request, waitForHandles, measure, deadline, loopTimeoutDuration, timeoutDurationAfterFence); @@ -506,8 +494,8 @@ std::tuple, Timing> DriverPreparedM } int syncFenceFd = -1; - if (syncFence.getNativeHandle()) { - syncFenceFd = dup(syncFence.getNativeHandle()->data[0]); + if (syncFence.hasFd()) { + syncFenceFd = dup(syncFence.getFd()); if (syncFenceFd < 0) { LOG(ERROR) << "Failed to dup the file descriptor"; return {ANEURALNETWORKS_OP_FAILED, -1, nullptr, timing}; diff --git a/nn/runtime/VersionedInterfaces.cpp b/nn/runtime/VersionedInterfaces.cpp index fce558c38..8246b623e 100644 --- a/nn/runtime/VersionedInterfaces.cpp +++ b/nn/runtime/VersionedInterfaces.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -416,67 +417,83 @@ static std::pair getCapabilitiesFunction( return result; } -std::tuple, Timing> +std::tuple, Timing> VersionedIPreparedModel::executeFenced(const Request& request, - const hardware::hidl_vec& waitFor, - MeasureTiming measure, + const std::vector& waitFor, MeasureTiming measure, const std::optional& deadline, const OptionalTimeoutDuration& loopTimeoutDuration, const OptionalTimeoutDuration& timeoutDurationAfterFence) { // version 1.3 HAL - hardware::hidl_handle syncFence; + hardware::hidl_handle hidlSyncFence; sp dispatchCallback; Timing timing = {UINT64_MAX, UINT64_MAX}; if (mPreparedModelV1_3 != nullptr) { ErrorStatus errorStatus; const auto otp = makeTimePoint(deadline); + auto waitForHandles = hal::utils::convertSyncFences(waitFor); + if (!waitForHandles.has_value()) { + LOG(ERROR) << "executeFenced failure: " << waitForHandles.error().message; + return std::make_tuple(ANEURALNETWORKS_OP_FAILED, SyncFence::createAsSignaled(), + nullptr, timing); + } hardware::Return ret = mPreparedModelV1_3->executeFenced( - convertToV1_3(request), waitFor, convertToV1_2(measure), convertToV1_3(otp), - convertToV1_3(loopTimeoutDuration), convertToV1_3(timeoutDurationAfterFence), - [&syncFence, &errorStatus, &dispatchCallback]( + convertToV1_3(request), std::move(waitForHandles).value(), convertToV1_2(measure), + convertToV1_3(otp), convertToV1_3(loopTimeoutDuration), + convertToV1_3(timeoutDurationAfterFence), + [&hidlSyncFence, &errorStatus, &dispatchCallback]( V1_3::ErrorStatus error, const hardware::hidl_handle& handle, const sp& callback) { - syncFence = handle; + hidlSyncFence = handle; errorStatus = uncheckedConvert(error); dispatchCallback = callback; }); if (!ret.isOk()) { LOG(ERROR) << "executeFenced failure: " << ret.description(); - return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hardware::hidl_handle(nullptr), + return std::make_tuple(ANEURALNETWORKS_OP_FAILED, SyncFence::createAsSignaled(), nullptr, timing); } if (errorStatus != ErrorStatus::NONE) { LOG(ERROR) << "executeFenced returned " << errorStatus; return std::make_tuple(convertErrorStatusToResultCode(errorStatus), - hardware::hidl_handle(nullptr), nullptr, timing); + SyncFence::createAsSignaled(), nullptr, timing); } - return std::make_tuple(ANEURALNETWORKS_NO_ERROR, syncFence, dispatchCallback, timing); + auto sharedHandle = hal::utils::sharedHandleFromNativeHandle(hidlSyncFence); + if (!sharedHandle.has_value()) { + LOG(ERROR) << "executeFenced failure: " << sharedHandle.error().message; + return std::make_tuple(ANEURALNETWORKS_OP_FAILED, SyncFence::createAsSignaled(), + nullptr, timing); + } + auto syncFence = sharedHandle.value() == nullptr + ? SyncFence::createAsSignaled() + : SyncFence::create(std::move(sharedHandle).value()); + if (!syncFence.has_value()) { + LOG(ERROR) << "executeFenced failure: " << syncFence.error(); + return std::make_tuple(ANEURALNETWORKS_OP_FAILED, SyncFence::createAsSignaled(), + nullptr, timing); + } + return std::make_tuple(ANEURALNETWORKS_NO_ERROR, std::move(syncFence).value(), + dispatchCallback, timing); } // fallback to synchronous execution if sync_fence is not supported // first wait for all sync fences to be ready. LOG(INFO) << "No drivers able to handle sync fences, falling back to regular execution"; - for (const auto& fenceHandle : waitFor) { - if (!fenceHandle.getNativeHandle()) { - return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hardware::hidl_handle(nullptr), - nullptr, timing); - } - int syncFd = fenceHandle.getNativeHandle()->data[0]; - if (syncFd <= 0) { - return std::make_tuple(ANEURALNETWORKS_BAD_DATA, hardware::hidl_handle(nullptr), - nullptr, timing); + for (const auto& fence : waitFor) { + if (!fence.hasFd() || fence.getFd() <= 0) { + return std::make_tuple(ANEURALNETWORKS_BAD_DATA, SyncFence::createAsSignaled(), nullptr, + timing); } - auto r = syncWait(syncFd, -1); - if (r != FenceState::SIGNALED) { - LOG(ERROR) << "syncWait failed, fd: " << syncFd; - return std::make_tuple(ANEURALNETWORKS_OP_FAILED, hardware::hidl_handle(nullptr), + auto r = fence.syncWait({/* no timeout */}); + if (r != SyncFence::FenceState::SIGNALED) { + LOG(ERROR) << "syncWait failed, fd: " << fence.getFd() << ", state: " << r; + return std::make_tuple(ANEURALNETWORKS_OP_FAILED, SyncFence::createAsSignaled(), nullptr, timing); } } int errorCode; std::tie(errorCode, std::ignore, timing) = executeSynchronously(request, measure, deadline, loopTimeoutDuration); - return std::make_tuple(errorCode, hardware::hidl_handle(nullptr), nullptr, timing); + return std::make_tuple(errorCode, SyncFence::createAsSignaled(), nullptr, timing); } static std::pair getCapabilitiesFunction( diff --git a/nn/runtime/VersionedInterfaces.h b/nn/runtime/VersionedInterfaces.h index d41dcd3ad..bd7e396bf 100644 --- a/nn/runtime/VersionedInterfaces.h +++ b/nn/runtime/VersionedInterfaces.h @@ -754,8 +754,8 @@ class VersionedIPreparedModel { * all sync fences in waitFor are signaled. * @return A tuple consisting of: * - Error code of the dispatch call. - * - A sync_fence that will be triggered when the task is completed. - * The sync_fence will be set to error if critical error occurs when doing + * - A SyncFence that will be triggered when the task is completed. + * The SyncFence will be set to error if critical error occurs when doing * actual evaluation. * - A callback can be used to query information like duration * and detailed runtime error status when the task is completed. @@ -763,11 +763,11 @@ class VersionedIPreparedModel { * sync execution. Either IFencedExecutionCallback will be * returned or optional timing information is returned */ - std::tuple, Timing> - executeFenced(const Request& request, const hardware::hidl_vec& waitFor, - MeasureTiming measure, const std::optional& deadline, - const OptionalTimeoutDuration& loopTimeoutDuration, - const OptionalTimeoutDuration& timeoutDurationAfterFence); + std::tuple, Timing> executeFenced( + const Request& request, const std::vector& waitFor, MeasureTiming measure, + const std::optional& deadline, + const OptionalTimeoutDuration& loopTimeoutDuration, + const OptionalTimeoutDuration& timeoutDurationAfterFence); private: friend class VersionedIDevice; diff --git a/nn/runtime/test/TestVersionedInterfaces.cpp b/nn/runtime/test/TestVersionedInterfaces.cpp index b4f32bcde..4187029f9 100644 --- a/nn/runtime/test/TestVersionedInterfaces.cpp +++ b/nn/runtime/test/TestVersionedInterfaces.cpp @@ -2182,7 +2182,7 @@ TEST_F(VersionedIPreparedModelV1_0Test, executeFenced) { // verify success EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2198,7 +2198,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeFenced) { // verify success EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2214,7 +2214,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeFenced) { // verify success EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2235,7 +2235,7 @@ TEST_F(VersionedIPreparedModelV1_3Test, executeFenced) { // verify success EXPECT_EQ(ANEURALNETWORKS_NO_ERROR, resultCode); - EXPECT_NE(nullptr, syncFence.getNativeHandle()); + EXPECT_NE(nullptr, syncFence.getSharedHandle()); EXPECT_NE(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2494,7 +2494,7 @@ TEST_F(VersionedIPreparedModelV1_0Test, executeFencedFailure) { // verify failure EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2511,7 +2511,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeFencedFailure) { // verify failure EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2528,7 +2528,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeFencedFailure) { // verify failure EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2550,7 +2550,7 @@ TEST_F(VersionedIPreparedModelV1_3Test, executeFencedFailure) { // verify failure EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2759,7 +2759,7 @@ TEST_F(VersionedIPreparedModelV1_0Test, executeFencedTransportFailure) { // verify failure EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2776,7 +2776,7 @@ TEST_F(VersionedIPreparedModelV1_1Test, executeFencedTransportFailure) { // verify failure EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2793,7 +2793,7 @@ TEST_F(VersionedIPreparedModelV1_2Test, executeFencedTransportFailure) { // verify failure EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } @@ -2810,7 +2810,7 @@ TEST_F(VersionedIPreparedModelV1_3Test, executeFencedTransportFailure) { // verify failure EXPECT_EQ(ANEURALNETWORKS_OP_FAILED, resultCode); - EXPECT_EQ(nullptr, syncFence.getNativeHandle()); + EXPECT_EQ(nullptr, syncFence.getSharedHandle()); EXPECT_EQ(nullptr, dispatchCallback.get()); EXPECT_EQ(kNoTiming, timing); } -- cgit v1.2.3