|
@@ -13,18 +13,18 @@ See the License for the specific language governing permissions and
|
|
|
limitations under the License.
|
|
limitations under the License.
|
|
|
==============================================================================*/
|
|
==============================================================================*/
|
|
|
|
|
|
|
|
-#include "flatbuffer_conversions.h"
|
|
|
|
|
|
|
+#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
|
|
|
|
|
|
|
|
#include <cstddef>
|
|
#include <cstddef>
|
|
|
#include <cstdint>
|
|
#include <cstdint>
|
|
|
#include <memory>
|
|
#include <memory>
|
|
|
|
|
|
|
|
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
|
|
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
|
|
|
-#include "tflite/c/builtin_op_data.h"
|
|
|
|
|
-#include "tflite/c/common.h"
|
|
|
|
|
-#include "error_reporter.h"
|
|
|
|
|
-#include "compatibility.h"
|
|
|
|
|
-#include "tflite/schema/schema_generated.h"
|
|
|
|
|
|
|
+#include "tensorflow/lite/c/builtin_op_data.h"
|
|
|
|
|
+#include "tensorflow/lite/c/common.h"
|
|
|
|
|
+#include "tensorflow/lite/core/api/error_reporter.h"
|
|
|
|
|
+#include "tensorflow/lite/kernels/internal/compatibility.h"
|
|
|
|
|
+#include "tensorflow/lite/schema/schema_generated.h"
|
|
|
|
|
|
|
|
namespace tflite {
|
|
namespace tflite {
|
|
|
|
|
|
|
@@ -109,7 +109,7 @@ TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
|
|
|
case ActivationFunctionType_RELU:
|
|
case ActivationFunctionType_RELU:
|
|
|
return kTfLiteActRelu;
|
|
return kTfLiteActRelu;
|
|
|
case ActivationFunctionType_RELU_N1_TO_1:
|
|
case ActivationFunctionType_RELU_N1_TO_1:
|
|
|
- return kTfLiteActRelu1;
|
|
|
|
|
|
|
+ return kTfLiteActReluN1To1;
|
|
|
case ActivationFunctionType_RELU6:
|
|
case ActivationFunctionType_RELU6:
|
|
|
return kTfLiteActRelu6;
|
|
return kTfLiteActRelu6;
|
|
|
case ActivationFunctionType_TANH:
|
|
case ActivationFunctionType_TANH:
|
|
@@ -131,339 +131,263 @@ TfLitePadding ConvertPadding(Padding padding) {
|
|
|
return kTfLitePaddingUnknown;
|
|
return kTfLitePaddingUnknown;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-} // namespace
|
|
|
|
|
|
|
+#ifndef TF_LITE_STATIC_MEMORY
|
|
|
|
|
+TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
|
|
|
|
|
+ ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator,
|
|
|
|
|
+ void** builtin_data) {
|
|
|
|
|
+ auto parseLSHProjectionType = [](LSHProjectionType type) {
|
|
|
|
|
+ switch (type) {
|
|
|
|
|
+ case LSHProjectionType_SPARSE:
|
|
|
|
|
+ return kTfLiteLshProjectionSparse;
|
|
|
|
|
+ case LSHProjectionType_DENSE:
|
|
|
|
|
+ return kTfLiteLshProjectionDense;
|
|
|
|
|
+ default:
|
|
|
|
|
+ return kTfLiteLshProjectionUnknown;
|
|
|
|
|
+ }
|
|
|
|
|
+ };
|
|
|
|
|
+ auto parseCombinerType = [](CombinerType type) {
|
|
|
|
|
+ switch (type) {
|
|
|
|
|
+ case CombinerType_MEAN:
|
|
|
|
|
+ return kTfLiteCombinerTypeMean;
|
|
|
|
|
+ case CombinerType_SQRTN:
|
|
|
|
|
+ return kTfLiteCombinerTypeSqrtn;
|
|
|
|
|
+ case CombinerType_SUM:
|
|
|
|
|
+ default:
|
|
|
|
|
+ return kTfLiteCombinerTypeSum;
|
|
|
|
|
+ }
|
|
|
|
|
+ };
|
|
|
|
|
|
|
|
-TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
|
|
|
|
|
- ErrorReporter* error_reporter) {
|
|
|
|
|
- switch (tensor_type) {
|
|
|
|
|
- case TensorType_FLOAT16:
|
|
|
|
|
- *type = kTfLiteFloat16;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- case TensorType_FLOAT32:
|
|
|
|
|
- *type = kTfLiteFloat32;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- case TensorType_FLOAT64:
|
|
|
|
|
- *type = kTfLiteFloat64;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- case TensorType_INT16:
|
|
|
|
|
- *type = kTfLiteInt16;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- case TensorType_INT32:
|
|
|
|
|
- *type = kTfLiteInt32;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- case TensorType_UINT8:
|
|
|
|
|
- *type = kTfLiteUInt8;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- case TensorType_INT8:
|
|
|
|
|
- *type = kTfLiteInt8;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- case TensorType_INT64:
|
|
|
|
|
- *type = kTfLiteInt64;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- case TensorType_STRING:
|
|
|
|
|
- *type = kTfLiteString;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- case TensorType_BOOL:
|
|
|
|
|
- *type = kTfLiteBool;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- case TensorType_COMPLEX64:
|
|
|
|
|
- *type = kTfLiteComplex64;
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- default:
|
|
|
|
|
- *type = kTfLiteNoType;
|
|
|
|
|
- TF_LITE_REPORT_ERROR(error_reporter,
|
|
|
|
|
- "Unsupported data type %d in tensor\n", tensor_type);
|
|
|
|
|
- return kTfLiteError;
|
|
|
|
|
- }
|
|
|
|
|
-}
|
|
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ *builtin_data = nullptr;
|
|
|
|
|
+ switch (op_type) {
|
|
|
|
|
+ case BuiltinOperator_ABS: {
|
|
|
|
|
+ return ParseAbs(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-TfLiteStatus ParseConv2D(const Operator* op, BuiltinOperator,
|
|
|
|
|
- ErrorReporter* error_reporter,
|
|
|
|
|
- BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
- CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_ADD: {
|
|
|
|
|
+ return ParseAdd(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
- std::unique_ptr<TfLiteConvParams,
|
|
|
|
|
- SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
- params = safe_allocator.Allocate<TfLiteConvParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
|
|
+ case BuiltinOperator_ARG_MAX: {
|
|
|
|
|
+ return ParseArgMax(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
|
|
|
|
|
|
|
+ case BuiltinOperator_ARG_MIN: {
|
|
|
|
|
+ return ParseArgMin(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- if (schema_params != nullptr) {
|
|
|
|
|
- params->padding = ConvertPadding(schema_params->padding());
|
|
|
|
|
- params->stride_width = schema_params->stride_w();
|
|
|
|
|
- params->stride_height = schema_params->stride_h();
|
|
|
|
|
- params->activation =
|
|
|
|
|
- ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
|
|
+ case BuiltinOperator_AVERAGE_POOL_2D: {
|
|
|
|
|
+ return ParsePool(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- params->dilation_width_factor = schema_params->dilation_w_factor();
|
|
|
|
|
- params->dilation_height_factor = schema_params->dilation_h_factor();
|
|
|
|
|
- } else {
|
|
|
|
|
- // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
- // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
- // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ case BuiltinOperator_CEIL: {
|
|
|
|
|
+ return ParseCeil(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
-}
|
|
|
|
|
|
|
+ case BuiltinOperator_CONCATENATION: {
|
|
|
|
|
+ return ParseConcatenation(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-TfLiteStatus ParseDepthwiseConv2D(const Operator* op, BuiltinOperator,
|
|
|
|
|
- ErrorReporter* error_reporter,
|
|
|
|
|
- BuiltinDataAllocator* allocator,
|
|
|
|
|
- void** builtin_data) {
|
|
|
|
|
- CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_CONV_2D: {
|
|
|
|
|
+ return ParseConv2D(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
|
|
+ case BuiltinOperator_DEPTHWISE_CONV_2D: {
|
|
|
|
|
+ return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- std::unique_ptr<TfLiteDepthwiseConvParams,
|
|
|
|
|
- SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
- params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
|
|
+ case BuiltinOperator_DEQUANTIZE: {
|
|
|
|
|
+ return ParseDequantize(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- const DepthwiseConv2DOptions* schema_params =
|
|
|
|
|
- op->builtin_options_as_DepthwiseConv2DOptions();
|
|
|
|
|
|
|
+ case BuiltinOperator_FLOOR: {
|
|
|
|
|
+ return ParseFloor(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- if (schema_params != nullptr) {
|
|
|
|
|
- params->padding = ConvertPadding(schema_params->padding());
|
|
|
|
|
- params->stride_width = schema_params->stride_w();
|
|
|
|
|
- params->stride_height = schema_params->stride_h();
|
|
|
|
|
- params->depth_multiplier = schema_params->depth_multiplier();
|
|
|
|
|
- params->activation =
|
|
|
|
|
- ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
|
|
+ case BuiltinOperator_FULLY_CONNECTED: {
|
|
|
|
|
+ return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- params->dilation_width_factor = schema_params->dilation_w_factor();
|
|
|
|
|
- params->dilation_height_factor = schema_params->dilation_h_factor();
|
|
|
|
|
- } else {
|
|
|
|
|
- // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
- // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
- // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ case BuiltinOperator_GREATER: {
|
|
|
|
|
+ return ParseGreater(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
-}
|
|
|
|
|
|
|
+ case BuiltinOperator_GREATER_EQUAL: {
|
|
|
|
|
+ return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
-// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
-// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
-TfLiteStatus ParseDequantize(const Operator*, BuiltinOperator, ErrorReporter*,
|
|
|
|
|
- BuiltinDataAllocator*, void**) {
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
-}
|
|
|
|
|
|
|
+ case BuiltinOperator_HARD_SWISH: {
|
|
|
|
|
+ return ParseHardSwish(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-TfLiteStatus ParseFullyConnected(const Operator* op, BuiltinOperator,
|
|
|
|
|
- ErrorReporter* error_reporter,
|
|
|
|
|
- BuiltinDataAllocator* allocator,
|
|
|
|
|
- void** builtin_data) {
|
|
|
|
|
- CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_L2_NORMALIZATION: {
|
|
|
|
|
+ return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
|
|
+ case BuiltinOperator_L2_POOL_2D: {
|
|
|
|
|
+ return ParsePool(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- std::unique_ptr<TfLiteFullyConnectedParams,
|
|
|
|
|
- SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
- params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
|
|
+ case BuiltinOperator_LESS: {
|
|
|
|
|
+ return ParseLess(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- const FullyConnectedOptions* schema_params =
|
|
|
|
|
- op->builtin_options_as_FullyConnectedOptions();
|
|
|
|
|
|
|
+ case BuiltinOperator_LESS_EQUAL: {
|
|
|
|
|
+ return ParseLessEqual(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- if (schema_params != nullptr) {
|
|
|
|
|
- params->activation =
|
|
|
|
|
- ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
- params->keep_num_dims = schema_params->keep_num_dims();
|
|
|
|
|
- params->asymmetric_quantize_inputs =
|
|
|
|
|
- schema_params->asymmetric_quantize_inputs();
|
|
|
|
|
|
|
+ case BuiltinOperator_LOG: {
|
|
|
|
|
+ return ParseLog(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- switch (schema_params->weights_format()) {
|
|
|
|
|
- case FullyConnectedOptionsWeightsFormat_DEFAULT:
|
|
|
|
|
- params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
|
|
|
|
|
- break;
|
|
|
|
|
- case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
|
|
|
|
|
- params->weights_format =
|
|
|
|
|
- kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
|
|
|
|
|
- break;
|
|
|
|
|
- default:
|
|
|
|
|
- TF_LITE_REPORT_ERROR(error_reporter,
|
|
|
|
|
- "Unhandled fully-connected weights format.");
|
|
|
|
|
- return kTfLiteError;
|
|
|
|
|
|
|
+ case BuiltinOperator_LOGICAL_AND: {
|
|
|
|
|
+ return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
- } else {
|
|
|
|
|
- // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
- // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
- // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
- }
|
|
|
|
|
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
-}
|
|
|
|
|
|
|
+ case BuiltinOperator_LOGICAL_NOT: {
|
|
|
|
|
+ return ParseLogicalNot(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-TfLiteStatus ParseReshape(const Operator* op, BuiltinOperator,
|
|
|
|
|
- ErrorReporter* error_reporter,
|
|
|
|
|
- BuiltinDataAllocator* allocator,
|
|
|
|
|
- void** builtin_data) {
|
|
|
|
|
- CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_LOGICAL_OR: {
|
|
|
|
|
+ return ParseLogicalOr(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
|
|
+ case BuiltinOperator_LOGISTIC: {
|
|
|
|
|
+ return ParseLogistic(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- std::unique_ptr<TfLiteReshapeParams,
|
|
|
|
|
- SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
- params = safe_allocator.Allocate<TfLiteReshapeParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
|
|
+ case BuiltinOperator_MAXIMUM: {
|
|
|
|
|
+ return ParseMaximum(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
|
|
|
|
|
|
|
+ case BuiltinOperator_MAX_POOL_2D: {
|
|
|
|
|
+ return ParsePool(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- if (schema_params != nullptr) {
|
|
|
|
|
- const flatbuffers::Vector<int32_t>* new_shape = schema_params->new_shape();
|
|
|
|
|
- // TODO(b/147203660): We need to figure out when dynamic reshape
|
|
|
|
|
- // (new_shape is a tensor) happens, why the option is not a nullptr.
|
|
|
|
|
- // But nonethless, we should only copy when new_shape is not a nullptr.
|
|
|
|
|
- if (new_shape != nullptr) {
|
|
|
|
|
- TF_LITE_ENSURE_STATUS(
|
|
|
|
|
- FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
|
|
|
|
|
- params->shape, error_reporter, "reshape"));
|
|
|
|
|
- params->num_dimensions = new_shape->size();
|
|
|
|
|
- } else {
|
|
|
|
|
- // TODO(b/157480169) TODO(b/147203660): We should either return
|
|
|
|
|
- // kTfLiteError or fill in some reasonable defaults in the params struct.
|
|
|
|
|
- // We are not doing so until we better undertand the ramifications of
|
|
|
|
|
- // changing the legacy behavior.
|
|
|
|
|
|
|
+ case BuiltinOperator_MEAN: {
|
|
|
|
|
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
- } else {
|
|
|
|
|
- // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
- // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
- // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
- }
|
|
|
|
|
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
-}
|
|
|
|
|
|
|
+ case BuiltinOperator_MINIMUM: {
|
|
|
|
|
+ return ParseMinimum(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
-// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
-// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
-TfLiteStatus ParseQuantize(const Operator*, BuiltinOperator, ErrorReporter*,
|
|
|
|
|
- BuiltinDataAllocator*, void**) {
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
-}
|
|
|
|
|
|
|
+ case BuiltinOperator_MUL: {
|
|
|
|
|
+ return ParseMul(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-TfLiteStatus ParseSoftmax(const Operator* op, BuiltinOperator,
|
|
|
|
|
- ErrorReporter* error_reporter,
|
|
|
|
|
- BuiltinDataAllocator* allocator,
|
|
|
|
|
- void** builtin_data) {
|
|
|
|
|
- CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_NEG: {
|
|
|
|
|
+ return ParseNeg(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
- std::unique_ptr<TfLiteSoftmaxParams,
|
|
|
|
|
- SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
- params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
|
|
+ case BuiltinOperator_NOT_EQUAL: {
|
|
|
|
|
+ return ParseNotEqual(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
|
|
|
|
|
|
|
+ case BuiltinOperator_PACK: {
|
|
|
|
|
+ return ParsePack(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- if (schema_params != nullptr) {
|
|
|
|
|
- params->beta = schema_params->beta();
|
|
|
|
|
- } else {
|
|
|
|
|
- // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
- // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
- // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ case BuiltinOperator_PAD: {
|
|
|
|
|
+ return ParsePad(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
-}
|
|
|
|
|
|
|
+ case BuiltinOperator_PADV2: {
|
|
|
|
|
+ return ParsePadV2(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-TfLiteStatus ParseSvdf(const Operator* op, BuiltinOperator,
|
|
|
|
|
- ErrorReporter* error_reporter,
|
|
|
|
|
- BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
- CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_PRELU: {
|
|
|
|
|
+ return ParsePrelu(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
- std::unique_ptr<TfLiteSVDFParams,
|
|
|
|
|
- SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
- params = safe_allocator.Allocate<TfLiteSVDFParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
|
|
+ case BuiltinOperator_QUANTIZE: {
|
|
|
|
|
+ return ParseQuantize(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
|
|
|
|
|
- if (schema_params != nullptr) {
|
|
|
|
|
- params->rank = schema_params->rank();
|
|
|
|
|
- params->activation =
|
|
|
|
|
- ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
- params->asymmetric_quantize_inputs =
|
|
|
|
|
- schema_params->asymmetric_quantize_inputs();
|
|
|
|
|
- } else {
|
|
|
|
|
- // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
- // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
- // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ case BuiltinOperator_REDUCE_ANY: {
|
|
|
|
|
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
-}
|
|
|
|
|
|
|
+ case BuiltinOperator_REDUCE_MAX: {
|
|
|
|
|
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
|
|
- ErrorReporter* error_reporter,
|
|
|
|
|
- BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
- auto parseLSHProjectionType = [](LSHProjectionType type) {
|
|
|
|
|
- switch (type) {
|
|
|
|
|
- case LSHProjectionType_SPARSE:
|
|
|
|
|
- return kTfLiteLshProjectionSparse;
|
|
|
|
|
- case LSHProjectionType_DENSE:
|
|
|
|
|
- return kTfLiteLshProjectionDense;
|
|
|
|
|
- default:
|
|
|
|
|
- return kTfLiteLshProjectionUnknown;
|
|
|
|
|
|
|
+ case BuiltinOperator_REDUCE_MIN: {
|
|
|
|
|
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
- };
|
|
|
|
|
- auto parseCombinerType = [](CombinerType type) {
|
|
|
|
|
- switch (type) {
|
|
|
|
|
- case CombinerType_MEAN:
|
|
|
|
|
- return kTfLiteCombinerTypeMean;
|
|
|
|
|
- case CombinerType_SQRTN:
|
|
|
|
|
- return kTfLiteCombinerTypeSqrtn;
|
|
|
|
|
- case CombinerType_SUM:
|
|
|
|
|
- default:
|
|
|
|
|
- return kTfLiteCombinerTypeSum;
|
|
|
|
|
|
|
+
|
|
|
|
|
+ case BuiltinOperator_REDUCE_PROD: {
|
|
|
|
|
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
- };
|
|
|
|
|
|
|
|
|
|
- SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
- *builtin_data = nullptr;
|
|
|
|
|
- switch (op_type) {
|
|
|
|
|
- case BuiltinOperator_CONV_2D: {
|
|
|
|
|
- return ParseConv2D(op, op_type, error_reporter, allocator, builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_RELU: {
|
|
|
|
|
+ return ParseRelu(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- case BuiltinOperator_DEPTHWISE_CONV_2D: {
|
|
|
|
|
- return ParseDepthwiseConv2D(op, op_type, error_reporter, allocator,
|
|
|
|
|
- builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_RELU6: {
|
|
|
|
|
+ return ParseRelu6(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- case BuiltinOperator_DEQUANTIZE: {
|
|
|
|
|
- return ParseDequantize(op, op_type, error_reporter, allocator,
|
|
|
|
|
- builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_RESHAPE: {
|
|
|
|
|
+ return ParseReshape(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- case BuiltinOperator_FULLY_CONNECTED: {
|
|
|
|
|
- return ParseFullyConnected(op, op_type, error_reporter, allocator,
|
|
|
|
|
- builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
|
|
|
|
|
+ return ParseResizeNearestNeighbor(op, error_reporter, allocator,
|
|
|
|
|
+ builtin_data);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- case BuiltinOperator_QUANTIZE: {
|
|
|
|
|
- return ParseQuantize(op, op_type, error_reporter, allocator,
|
|
|
|
|
- builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_ROUND: {
|
|
|
|
|
+ return ParseRound(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- case BuiltinOperator_RESHAPE: {
|
|
|
|
|
- return ParseReshape(op, op_type, error_reporter, allocator, builtin_data);
|
|
|
|
|
|
|
+ case BuiltinOperator_RSQRT: {
|
|
|
|
|
+ return ParseRsqrt(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ case BuiltinOperator_SIN: {
|
|
|
|
|
+ return ParseSin(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
case BuiltinOperator_SOFTMAX: {
|
|
case BuiltinOperator_SOFTMAX: {
|
|
|
- return ParseSoftmax(op, op_type, error_reporter, allocator, builtin_data);
|
|
|
|
|
|
|
+ return ParseSoftmax(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ case BuiltinOperator_SPLIT: {
|
|
|
|
|
+ return ParseSplit(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ case BuiltinOperator_SQRT: {
|
|
|
|
|
+ return ParseSqrt(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ case BuiltinOperator_SQUARE: {
|
|
|
|
|
+ return ParseSquare(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ case BuiltinOperator_STRIDED_SLICE: {
|
|
|
|
|
+ return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ case BuiltinOperator_SUB: {
|
|
|
|
|
+ return ParseSub(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ case BuiltinOperator_SUM: {
|
|
|
|
|
+ return ParseReducer(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
case BuiltinOperator_SVDF: {
|
|
case BuiltinOperator_SVDF: {
|
|
|
- return ParseSvdf(op, op_type, error_reporter, allocator, builtin_data);
|
|
|
|
|
|
|
+ return ParseSvdf(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ case BuiltinOperator_TANH: {
|
|
|
|
|
+ return ParseTanh(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ case BuiltinOperator_UNPACK: {
|
|
|
|
|
+ return ParseUnpack(op, error_reporter, allocator, builtin_data);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
case BuiltinOperator_CAST: {
|
|
case BuiltinOperator_CAST: {
|
|
@@ -490,23 +414,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
*builtin_data = params.release();
|
|
*builtin_data = params.release();
|
|
|
return kTfLiteOk;
|
|
return kTfLiteOk;
|
|
|
}
|
|
}
|
|
|
- case BuiltinOperator_AVERAGE_POOL_2D:
|
|
|
|
|
- case BuiltinOperator_MAX_POOL_2D:
|
|
|
|
|
- case BuiltinOperator_L2_POOL_2D: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLitePoolParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* pool_params = op->builtin_options_as_Pool2DOptions()) {
|
|
|
|
|
- params->padding = ConvertPadding(pool_params->padding());
|
|
|
|
|
- params->stride_width = pool_params->stride_w();
|
|
|
|
|
- params->stride_height = pool_params->stride_h();
|
|
|
|
|
- params->filter_width = pool_params->filter_width();
|
|
|
|
|
- params->filter_height = pool_params->filter_height();
|
|
|
|
|
- params->activation =
|
|
|
|
|
- ConvertActivation(pool_params->fused_activation_function());
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
|
|
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
|
|
|
auto params = safe_allocator.Allocate<TfLiteSequenceRNNParams>();
|
|
auto params = safe_allocator.Allocate<TfLiteSequenceRNNParams>();
|
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
@@ -564,38 +471,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
case BuiltinOperator_HASHTABLE_LOOKUP:
|
|
case BuiltinOperator_HASHTABLE_LOOKUP:
|
|
|
// no-op.
|
|
// no-op.
|
|
|
return kTfLiteOk;
|
|
return kTfLiteOk;
|
|
|
- case BuiltinOperator_CONCATENATION: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteConcatenationParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* concatenation_params =
|
|
|
|
|
- op->builtin_options_as_ConcatenationOptions()) {
|
|
|
|
|
- params->activation = ConvertActivation(
|
|
|
|
|
- concatenation_params->fused_activation_function());
|
|
|
|
|
- params->axis = concatenation_params->axis();
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
- case BuiltinOperator_MUL: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteMulParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* schema_params = op->builtin_options_as_MulOptions()) {
|
|
|
|
|
- params->activation =
|
|
|
|
|
- ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
- case BuiltinOperator_ADD: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteAddParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* schema_params = op->builtin_options_as_AddOptions()) {
|
|
|
|
|
- params->activation =
|
|
|
|
|
- ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
case BuiltinOperator_DIV: {
|
|
case BuiltinOperator_DIV: {
|
|
|
auto params = safe_allocator.Allocate<TfLiteDivParams>();
|
|
auto params = safe_allocator.Allocate<TfLiteDivParams>();
|
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
@@ -606,26 +481,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
*builtin_data = params.release();
|
|
*builtin_data = params.release();
|
|
|
return kTfLiteOk;
|
|
return kTfLiteOk;
|
|
|
}
|
|
}
|
|
|
- case BuiltinOperator_SUB: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteSubParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* schema_params = op->builtin_options_as_SubOptions()) {
|
|
|
|
|
- params->activation =
|
|
|
|
|
- ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
- case BuiltinOperator_L2_NORMALIZATION: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteL2NormParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* schema_params = op->builtin_options_as_L2NormOptions()) {
|
|
|
|
|
- params->activation =
|
|
|
|
|
- ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
|
|
case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
|
|
|
auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
|
|
auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
|
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
@@ -721,21 +576,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
*builtin_data = params.release();
|
|
*builtin_data = params.release();
|
|
|
return kTfLiteOk;
|
|
return kTfLiteOk;
|
|
|
}
|
|
}
|
|
|
- case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
|
|
|
|
|
- auto params =
|
|
|
|
|
- safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* schema_params =
|
|
|
|
|
- op->builtin_options_as_ResizeNearestNeighborOptions()) {
|
|
|
|
|
- params->align_corners = schema_params->align_corners();
|
|
|
|
|
- params->half_pixel_centers = schema_params->half_pixel_centers();
|
|
|
|
|
- } else {
|
|
|
|
|
- params->align_corners = false;
|
|
|
|
|
- params->half_pixel_centers = false;
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
case BuiltinOperator_SKIP_GRAM: {
|
|
case BuiltinOperator_SKIP_GRAM: {
|
|
|
auto params = safe_allocator.Allocate<TfLiteSkipGramParams>();
|
|
auto params = safe_allocator.Allocate<TfLiteSkipGramParams>();
|
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
@@ -779,29 +619,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
*builtin_data = params.release();
|
|
*builtin_data = params.release();
|
|
|
return kTfLiteOk;
|
|
return kTfLiteOk;
|
|
|
}
|
|
}
|
|
|
- case BuiltinOperator_MEAN:
|
|
|
|
|
- case BuiltinOperator_REDUCE_MAX:
|
|
|
|
|
- case BuiltinOperator_REDUCE_MIN:
|
|
|
|
|
- case BuiltinOperator_REDUCE_PROD:
|
|
|
|
|
- case BuiltinOperator_REDUCE_ANY:
|
|
|
|
|
- case BuiltinOperator_SUM: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteReducerParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* schema_params = op->builtin_options_as_ReducerOptions()) {
|
|
|
|
|
- params->keep_dims = schema_params->keep_dims();
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
- case BuiltinOperator_SPLIT: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteSplitParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* schema_params = op->builtin_options_as_SplitOptions()) {
|
|
|
|
|
- params->num_splits = schema_params->num_splits();
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
case BuiltinOperator_SPLIT_V: {
|
|
case BuiltinOperator_SPLIT_V: {
|
|
|
auto params = safe_allocator.Allocate<TfLiteSplitParams>();
|
|
auto params = safe_allocator.Allocate<TfLiteSplitParams>();
|
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
@@ -824,42 +641,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
*builtin_data = params.release();
|
|
*builtin_data = params.release();
|
|
|
return kTfLiteOk;
|
|
return kTfLiteOk;
|
|
|
}
|
|
}
|
|
|
- case BuiltinOperator_STRIDED_SLICE: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* schema_params =
|
|
|
|
|
- op->builtin_options_as_StridedSliceOptions()) {
|
|
|
|
|
- params->begin_mask = schema_params->begin_mask();
|
|
|
|
|
- params->end_mask = schema_params->end_mask();
|
|
|
|
|
- params->ellipsis_mask = schema_params->ellipsis_mask();
|
|
|
|
|
- params->new_axis_mask = schema_params->new_axis_mask();
|
|
|
|
|
- params->shrink_axis_mask = schema_params->shrink_axis_mask();
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
- case BuiltinOperator_ARG_MAX: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteArgMaxParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* schema_params = op->builtin_options_as_ArgMaxOptions()) {
|
|
|
|
|
- TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->output_type(),
|
|
|
|
|
- ¶ms->output_type,
|
|
|
|
|
- error_reporter));
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
- case BuiltinOperator_ARG_MIN: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteArgMinParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* schema_params = op->builtin_options_as_ArgMinOptions()) {
|
|
|
|
|
- TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->output_type(),
|
|
|
|
|
- ¶ms->output_type,
|
|
|
|
|
- error_reporter));
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
case BuiltinOperator_TRANSPOSE_CONV: {
|
|
case BuiltinOperator_TRANSPOSE_CONV: {
|
|
|
auto params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
|
|
auto params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
|
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
@@ -892,16 +673,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
*builtin_data = params.release();
|
|
*builtin_data = params.release();
|
|
|
return kTfLiteOk;
|
|
return kTfLiteOk;
|
|
|
}
|
|
}
|
|
|
- case BuiltinOperator_PACK: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLitePackParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* pack_params = op->builtin_options_as_PackOptions()) {
|
|
|
|
|
- params->values_count = pack_params->values_count();
|
|
|
|
|
- params->axis = pack_params->axis();
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
case BuiltinOperator_DELEGATE: {
|
|
case BuiltinOperator_DELEGATE: {
|
|
|
// TODO(ycling): Revisit when supporting saving delegated models.
|
|
// TODO(ycling): Revisit when supporting saving delegated models.
|
|
|
TF_LITE_REPORT_ERROR(error_reporter,
|
|
TF_LITE_REPORT_ERROR(error_reporter,
|
|
@@ -930,16 +701,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
*builtin_data = params.release();
|
|
*builtin_data = params.release();
|
|
|
return kTfLiteOk;
|
|
return kTfLiteOk;
|
|
|
}
|
|
}
|
|
|
- case BuiltinOperator_UNPACK: {
|
|
|
|
|
- auto params = safe_allocator.Allocate<TfLiteUnpackParams>();
|
|
|
|
|
- TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
- if (const auto* unpack_params = op->builtin_options_as_UnpackOptions()) {
|
|
|
|
|
- params->num = unpack_params->num();
|
|
|
|
|
- params->axis = unpack_params->axis();
|
|
|
|
|
- }
|
|
|
|
|
- *builtin_data = params.release();
|
|
|
|
|
- return kTfLiteOk;
|
|
|
|
|
- }
|
|
|
|
|
case BuiltinOperator_LEAKY_RELU: {
|
|
case BuiltinOperator_LEAKY_RELU: {
|
|
|
auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
|
|
auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
|
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
@@ -1019,7 +780,6 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
return kTfLiteOk;
|
|
return kTfLiteOk;
|
|
|
}
|
|
}
|
|
|
// Below are the ops with no builtin_data structure.
|
|
// Below are the ops with no builtin_data structure.
|
|
|
- case BuiltinOperator_ABS:
|
|
|
|
|
case BuiltinOperator_BATCH_TO_SPACE_ND:
|
|
case BuiltinOperator_BATCH_TO_SPACE_ND:
|
|
|
// TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
|
|
// TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
|
|
|
// ok for now, since there is no call implementation either.
|
|
// ok for now, since there is no call implementation either.
|
|
@@ -1032,46 +792,19 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
case BuiltinOperator_EQUAL:
|
|
case BuiltinOperator_EQUAL:
|
|
|
case BuiltinOperator_EXP:
|
|
case BuiltinOperator_EXP:
|
|
|
case BuiltinOperator_EXPAND_DIMS:
|
|
case BuiltinOperator_EXPAND_DIMS:
|
|
|
- case BuiltinOperator_CEIL:
|
|
|
|
|
- case BuiltinOperator_FLOOR:
|
|
|
|
|
- case BuiltinOperator_GREATER:
|
|
|
|
|
- case BuiltinOperator_GREATER_EQUAL:
|
|
|
|
|
- case BuiltinOperator_HARD_SWISH:
|
|
|
|
|
- case BuiltinOperator_LESS:
|
|
|
|
|
- case BuiltinOperator_LESS_EQUAL:
|
|
|
|
|
- case BuiltinOperator_LOG:
|
|
|
|
|
- case BuiltinOperator_LOGISTIC:
|
|
|
|
|
case BuiltinOperator_LOG_SOFTMAX:
|
|
case BuiltinOperator_LOG_SOFTMAX:
|
|
|
case BuiltinOperator_MATRIX_DIAG:
|
|
case BuiltinOperator_MATRIX_DIAG:
|
|
|
case BuiltinOperator_MATRIX_SET_DIAG:
|
|
case BuiltinOperator_MATRIX_SET_DIAG:
|
|
|
- case BuiltinOperator_MAXIMUM:
|
|
|
|
|
- case BuiltinOperator_MINIMUM:
|
|
|
|
|
- case BuiltinOperator_NEG:
|
|
|
|
|
- case BuiltinOperator_NOT_EQUAL:
|
|
|
|
|
- case BuiltinOperator_PAD:
|
|
|
|
|
- case BuiltinOperator_PADV2:
|
|
|
|
|
- case BuiltinOperator_PRELU:
|
|
|
|
|
- case BuiltinOperator_RELU:
|
|
|
|
|
- case BuiltinOperator_RELU6:
|
|
|
|
|
case BuiltinOperator_RELU_N1_TO_1:
|
|
case BuiltinOperator_RELU_N1_TO_1:
|
|
|
- case BuiltinOperator_ROUND:
|
|
|
|
|
- case BuiltinOperator_RSQRT:
|
|
|
|
|
case BuiltinOperator_SELECT:
|
|
case BuiltinOperator_SELECT:
|
|
|
case BuiltinOperator_SELECT_V2:
|
|
case BuiltinOperator_SELECT_V2:
|
|
|
- case BuiltinOperator_SIN:
|
|
|
|
|
case BuiltinOperator_SLICE:
|
|
case BuiltinOperator_SLICE:
|
|
|
case BuiltinOperator_SPACE_TO_BATCH_ND:
|
|
case BuiltinOperator_SPACE_TO_BATCH_ND:
|
|
|
- case BuiltinOperator_SQRT:
|
|
|
|
|
- case BuiltinOperator_TANH:
|
|
|
|
|
case BuiltinOperator_TILE:
|
|
case BuiltinOperator_TILE:
|
|
|
case BuiltinOperator_TOPK_V2:
|
|
case BuiltinOperator_TOPK_V2:
|
|
|
case BuiltinOperator_TRANSPOSE:
|
|
case BuiltinOperator_TRANSPOSE:
|
|
|
case BuiltinOperator_POW:
|
|
case BuiltinOperator_POW:
|
|
|
- case BuiltinOperator_LOGICAL_OR:
|
|
|
|
|
- case BuiltinOperator_LOGICAL_AND:
|
|
|
|
|
- case BuiltinOperator_LOGICAL_NOT:
|
|
|
|
|
case BuiltinOperator_FLOOR_DIV:
|
|
case BuiltinOperator_FLOOR_DIV:
|
|
|
- case BuiltinOperator_SQUARE:
|
|
|
|
|
case BuiltinOperator_ZEROS_LIKE:
|
|
case BuiltinOperator_ZEROS_LIKE:
|
|
|
case BuiltinOperator_FILL:
|
|
case BuiltinOperator_FILL:
|
|
|
case BuiltinOperator_FLOOR_MOD:
|
|
case BuiltinOperator_FLOOR_MOD:
|
|
@@ -1091,5 +824,916 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
}
|
|
}
|
|
|
return kTfLiteError;
|
|
return kTfLiteError;
|
|
|
} // NOLINT[readability/fn_size]
|
|
} // NOLINT[readability/fn_size]
|
|
|
|
|
+#endif // !defined(TF_LITE_STATIC_MEMORY)
|
|
|
|
|
+} // namespace
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
|
|
|
|
|
+ ErrorReporter* error_reporter) {
|
|
|
|
|
+ switch (tensor_type) {
|
|
|
|
|
+ case TensorType_FLOAT16:
|
|
|
|
|
+ *type = kTfLiteFloat16;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_FLOAT32:
|
|
|
|
|
+ *type = kTfLiteFloat32;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_FLOAT64:
|
|
|
|
|
+ *type = kTfLiteFloat64;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_INT16:
|
|
|
|
|
+ *type = kTfLiteInt16;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_INT32:
|
|
|
|
|
+ *type = kTfLiteInt32;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_UINT8:
|
|
|
|
|
+ *type = kTfLiteUInt8;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_INT8:
|
|
|
|
|
+ *type = kTfLiteInt8;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_INT64:
|
|
|
|
|
+ *type = kTfLiteInt64;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_STRING:
|
|
|
|
|
+ *type = kTfLiteString;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_BOOL:
|
|
|
|
|
+ *type = kTfLiteBool;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_COMPLEX64:
|
|
|
|
|
+ *type = kTfLiteComplex64;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ case TensorType_COMPLEX128:
|
|
|
|
|
+ *type = kTfLiteComplex128;
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+ default:
|
|
|
|
|
+ *type = kTfLiteNoType;
|
|
|
|
|
+ TF_LITE_REPORT_ERROR(error_reporter,
|
|
|
|
|
+ "Unsupported data type %d in tensor\n", tensor_type);
|
|
|
|
|
+ return kTfLiteError;
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteAddParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteAddParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const AddOptions* schema_params = op->builtin_options_as_AddOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->activation =
|
|
|
|
|
+ ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
+ params->pot_scale_int16 = schema_params->pot_scale_int16();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteArgMaxParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteArgMaxParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ TF_LITE_ENSURE_STATUS(ConvertTensorType(
|
|
|
|
|
+ schema_params->output_type(), ¶ms->output_type, error_reporter));
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteArgMinParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteArgMinParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ TF_LITE_ENSURE_STATUS(ConvertTensorType(
|
|
|
|
|
+ schema_params->output_type(), ¶ms->output_type, error_reporter));
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseConcatenation(const Operator* op,
|
|
|
|
|
+ ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator,
|
|
|
|
|
+ void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteConcatenationParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteConcatenationParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const ConcatenationOptions* schema_params =
|
|
|
|
|
+ op->builtin_options_as_ConcatenationOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->activation =
|
|
|
|
|
+ ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
+ params->axis = schema_params->axis();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteConvParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteConvParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->padding = ConvertPadding(schema_params->padding());
|
|
|
|
|
+ params->stride_width = schema_params->stride_w();
|
|
|
|
|
+ params->stride_height = schema_params->stride_h();
|
|
|
|
|
+ params->activation =
|
|
|
|
|
+ ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
+
|
|
|
|
|
+ params->dilation_width_factor = schema_params->dilation_w_factor();
|
|
|
|
|
+ params->dilation_height_factor = schema_params->dilation_h_factor();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
|
|
|
|
|
+ ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator,
|
|
|
|
|
+ void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+
|
|
|
|
|
+ std::unique_ptr<TfLiteDepthwiseConvParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const DepthwiseConv2DOptions* schema_params =
|
|
|
|
|
+ op->builtin_options_as_DepthwiseConv2DOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->padding = ConvertPadding(schema_params->padding());
|
|
|
|
|
+ params->stride_width = schema_params->stride_w();
|
|
|
|
|
+ params->stride_height = schema_params->stride_h();
|
|
|
|
|
+ params->depth_multiplier = schema_params->depth_multiplier();
|
|
|
|
|
+ params->activation =
|
|
|
|
|
+ ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
+
|
|
|
|
|
+ params->dilation_width_factor = schema_params->dilation_w_factor();
|
|
|
|
|
+ params->dilation_height_factor = schema_params->dilation_h_factor();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseFullyConnected(const Operator* op,
|
|
|
|
|
+ ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator,
|
|
|
|
|
+ void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+
|
|
|
|
|
+ std::unique_ptr<TfLiteFullyConnectedParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const FullyConnectedOptions* schema_params =
|
|
|
|
|
+ op->builtin_options_as_FullyConnectedOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->activation =
|
|
|
|
|
+ ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
+ params->keep_num_dims = schema_params->keep_num_dims();
|
|
|
|
|
+ params->asymmetric_quantize_inputs =
|
|
|
|
|
+ schema_params->asymmetric_quantize_inputs();
|
|
|
|
|
+
|
|
|
|
|
+ switch (schema_params->weights_format()) {
|
|
|
|
|
+ case FullyConnectedOptionsWeightsFormat_DEFAULT:
|
|
|
|
|
+ params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
|
|
|
|
|
+ break;
|
|
|
|
|
+ case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
|
|
|
|
|
+ params->weights_format =
|
|
|
|
|
+ kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
|
|
|
|
|
+ break;
|
|
|
|
|
+ default:
|
|
|
|
|
+ TF_LITE_REPORT_ERROR(error_reporter,
|
|
|
|
|
+ "Unhandled fully-connected weights format.");
|
|
|
|
|
+ return kTfLiteError;
|
|
|
|
|
+ }
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseGreater(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseL2Normalization(const Operator* op,
|
|
|
|
|
+ ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator,
|
|
|
|
|
+ void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteL2NormParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteL2NormParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->activation =
|
|
|
|
|
+ ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteMulParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteMulParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const MulOptions* schema_params = op->builtin_options_as_MulOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->activation =
|
|
|
|
|
+ ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLitePackParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLitePackParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const PackOptions* schema_params = op->builtin_options_as_PackOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->values_count = schema_params->values_count();
|
|
|
|
|
+ params->axis = schema_params->axis();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLitePoolParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLitePoolParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->padding = ConvertPadding(schema_params->padding());
|
|
|
|
|
+ params->stride_width = schema_params->stride_w();
|
|
|
|
|
+ params->stride_height = schema_params->stride_h();
|
|
|
|
|
+ params->filter_width = schema_params->filter_width();
|
|
|
|
|
+ params->filter_height = schema_params->filter_height();
|
|
|
|
|
+ params->activation =
|
|
|
|
|
+ ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
|
|
|
|
|
+ BuiltinDataAllocator*, void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator,
|
|
|
|
|
+ void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+
|
|
|
|
|
+ std::unique_ptr<TfLiteReducerParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteReducerParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->keep_dims = schema_params->keep_dims();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator,
|
|
|
|
|
+ void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+
|
|
|
|
|
+ std::unique_ptr<TfLiteReshapeParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteReshapeParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ const flatbuffers::Vector<int32_t>* new_shape = schema_params->new_shape();
|
|
|
|
|
+ // TODO(b/147203660): We need to figure out when dynamic reshape
|
|
|
|
|
+ // (new_shape is a tensor) happens, why the option is not a nullptr.
|
|
|
|
|
+ // But nonethless, we should only copy when new_shape is not a nullptr.
|
|
|
|
|
+ if (new_shape != nullptr) {
|
|
|
|
|
+ TF_LITE_ENSURE_STATUS(
|
|
|
|
|
+ FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
|
|
|
|
|
+ params->shape, error_reporter, "reshape"));
|
|
|
|
|
+ params->num_dimensions = new_shape->size();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169) TODO(b/147203660): We should either return
|
|
|
|
|
+ // kTfLiteError or fill in some reasonable defaults in the params struct.
|
|
|
|
|
+ // We are not doing so until we better undertand the ramifications of
|
|
|
|
|
+ // changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
|
|
|
|
|
+ ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator,
|
|
|
|
|
+ void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteResizeNearestNeighborParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const ResizeNearestNeighborOptions* schema_params =
|
|
|
|
|
+ op->builtin_options_as_ResizeNearestNeighborOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->align_corners = schema_params->align_corners();
|
|
|
|
|
+ params->half_pixel_centers = schema_params->half_pixel_centers();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ params->align_corners = false;
|
|
|
|
|
+ params->half_pixel_centers = false;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator,
|
|
|
|
|
+ void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteSoftmaxParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->beta = schema_params->beta();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteSplitParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteSplitParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->num_splits = schema_params->num_splits();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseStridedSlice(const Operator* op,
|
|
|
|
|
+ ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator,
|
|
|
|
|
+ void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteStridedSliceParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const StridedSliceOptions* schema_params =
|
|
|
|
|
+ op->builtin_options_as_StridedSliceOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->begin_mask = schema_params->begin_mask();
|
|
|
|
|
+ params->end_mask = schema_params->end_mask();
|
|
|
|
|
+ params->ellipsis_mask = schema_params->ellipsis_mask();
|
|
|
|
|
+ params->new_axis_mask = schema_params->new_axis_mask();
|
|
|
|
|
+ params->shrink_axis_mask = schema_params->shrink_axis_mask();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteSubParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteSubParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const SubOptions* schema_params = op->builtin_options_as_SubOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->activation =
|
|
|
|
|
+ ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
+ params->pot_scale_int16 = schema_params->pot_scale_int16();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteSVDFParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteSVDFParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->rank = schema_params->rank();
|
|
|
|
|
+ params->activation =
|
|
|
|
|
+ ConvertActivation(schema_params->fused_activation_function());
|
|
|
|
|
+ params->asymmetric_quantize_inputs =
|
|
|
|
|
+ schema_params->asymmetric_quantize_inputs();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+// We have this parse function instead of directly returning kTfLiteOk from the
|
|
|
|
|
+// switch-case in ParseOpData because this function is used as part of the
|
|
|
|
|
+// selective registration for the OpResolver implementation in micro.
|
|
|
|
|
+TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
|
|
|
|
|
+ void**) {
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+ CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
|
|
|
|
|
+
|
|
|
|
|
+ SafeBuiltinDataAllocator safe_allocator(allocator);
|
|
|
|
|
+ std::unique_ptr<TfLiteUnpackParams,
|
|
|
|
|
+ SafeBuiltinDataAllocator::BuiltinDataDeleter>
|
|
|
|
|
+ params = safe_allocator.Allocate<TfLiteUnpackParams>();
|
|
|
|
|
+ TF_LITE_ENSURE(error_reporter, params != nullptr);
|
|
|
|
|
+
|
|
|
|
|
+ const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();
|
|
|
|
|
+
|
|
|
|
|
+ if (schema_params != nullptr) {
|
|
|
|
|
+ params->num = schema_params->num();
|
|
|
|
|
+ params->axis = schema_params->axis();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ // TODO(b/157480169): We should either return kTfLiteError or fill in some
|
|
|
|
|
+ // reasonable defaults in the params struct. We are not doing so until we
|
|
|
|
|
+ // better undertand the ramifications of changing the legacy behavior.
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ *builtin_data = params.release();
|
|
|
|
|
+ return kTfLiteOk;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
|
|
|
|
|
+ ErrorReporter* error_reporter,
|
|
|
|
|
+ BuiltinDataAllocator* allocator, void** builtin_data) {
|
|
|
|
|
+// TODO(b/145762662): It would be preferable to have the build graph for TF Lite
|
|
|
|
|
+// Micro not have the ParseOpData function at all. This would require splitting
|
|
|
|
|
+// the current file into two separate files, one of which defines the
|
|
|
|
|
+// ParseOpData function and the other that defines the operator specific parse
|
|
|
|
|
+// functions (e.g. ParseAdd).
|
|
|
|
|
+//
|
|
|
|
|
+// Such a split was attempted but was not worth the effort at the time because
|
|
|
|
|
+// of the following reasons:
|
|
|
|
|
+// * We could either duplicate the functions and the SafeBuiltinDataAllocator
|
|
|
|
|
+// class in the anonymous namespace of this file, or attempt to make a common
|
|
|
|
|
+// library with these helper functions and class.
|
|
|
|
|
+// * Making a common library with a separate build target was not feasible as
|
|
|
|
|
+// it introduced circular dependencies due to the ErrorReporter and a common
|
|
|
|
|
+// .cc and .h within the same api build target the also cause circular
|
|
|
|
|
+// dependencies due to the BuiltinDataAllocator class.
|
|
|
|
|
+// * If all the builtin operators were to have their own parse functions, or we
|
|
|
|
|
+// were ok with some amount of code duplication, then this split of the .cc
|
|
|
|
|
+// files would be a lot more feasible.
|
|
|
|
|
+#ifdef TF_LITE_STATIC_MEMORY
|
|
|
|
|
+ TF_LITE_REPORT_ERROR(
|
|
|
|
|
+ error_reporter,
|
|
|
|
|
+ "ParseOpData is unsupported on TfLiteMicro, please use the operator "
|
|
|
|
|
+ "specific parse functions (e.g. ParseAdd etc.).\n");
|
|
|
|
|
+ return kTfLiteError;
|
|
|
|
|
+#else
|
|
|
|
|
+ return ParseOpDataTfLite(op, op_type, error_reporter, allocator,
|
|
|
|
|
+ builtin_data);
|
|
|
|
|
+#endif
|
|
|
|
|
+}
|
|
|
|
|
|
|
|
} // namespace tflite
|
|
} // namespace tflite
|