reduce.cc 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #include "tensorflow/lite/kernels/internal/reference/reduce.h"
  13. #include "tensorflow/lite/c/builtin_op_data.h"
  14. #include "tensorflow/lite/c/common.h"
  15. #include "tensorflow/lite/kernels/internal/quantization_util.h"
  16. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
  17. #include "tensorflow/lite/kernels/internal/types.h"
  18. #include "tensorflow/lite/kernels/kernel_util.h"
  19. #include "tensorflow/lite/micro/kernels/kernel_util.h"
  20. #include "tensorflow/lite/micro/micro_utils.h"
  21. namespace tflite {
  22. namespace ops {
  23. namespace micro {
  24. namespace reduce {
  25. constexpr int kMaxNumberOfAxis = 4;
  26. constexpr int kMaxNumberOfReducedAxis = 2;
  27. TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) {
  28. // Inputs Tensor (dtype depends on quantization):
  29. // [0] = Input
  30. // [1] = Axis
  31. // Outputs Tensor (dtype depends on quantization):
  32. // [0] = Output
  33. // Validate number of inputs and outputs
  34. TF_LITE_ENSURE_EQ(context, node->inputs->size, 2);
  35. TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
  36. // Validate axis type
  37. const TfLiteTensor* axis = GetInput(context, node, 1);
  38. TF_LITE_ENSURE_TYPES_EQ(context, axis->type, kTfLiteInt32);
  39. return kTfLiteOk;
  40. }
  41. TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) {
  42. TF_LITE_ENSURE_OK(context, PrepareSimple(context, node));
  43. // TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018)
  44. return kTfLiteOk;
  45. }
  46. void ResolveAxis(const int* axis_data, int axis_count,
  47. tflite::MeanParams* op_params) {
  48. int i = 0;
  49. for (; i < axis_count; ++i) {
  50. op_params->axis[i] = static_cast<int16_t>(axis_data[i]);
  51. }
  52. for (; i < 4; ++i) {
  53. op_params->axis[i] = 1;
  54. }
  55. op_params->axis_count = axis_count;
  56. }
  57. TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {
  58. const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0);
  59. const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1);
  60. TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0);
  61. TfLiteReducerParams* params =
  62. reinterpret_cast<TfLiteReducerParams*>(node->builtin_data);
  63. int num_axis = static_cast<int>(ElementCount(*axis->dims));
  64. int temp_index[kMaxNumberOfAxis];
  65. int resolved_axis[kMaxNumberOfReducedAxis];
  66. switch (input->type) {
  67. case kTfLiteFloat32: {
  68. tflite::MeanParams op_params;
  69. ResolveAxis(tflite::micro::GetTensorData<int>(axis), num_axis,
  70. &op_params);
  71. // TODO(b/146571391): Support only 4D Input and 2D Axis for Mean until
  72. // scratch tensor allocation has been implemented in (b/132070898)
  73. bool is_valid_inputs =
  74. (input->dims->size == 4 && op_params.axis_count == 2 &&
  75. ((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
  76. (op_params.axis[0] == 2 && op_params.axis[1] == 1)));
  77. TF_LITE_ENSURE_MSG(
  78. context, is_valid_inputs == true,
  79. "Number of Input "
  80. "dimensions != 4 OR the Axis is not either [1, 2] or [2, 1]");
  81. // TODO(b/139102329): Handle the below special case in the combined
  82. // reference method.
  83. // Defer to specialized implementation for 4D Mean across axes 1 & 2.
  84. if (params->keep_dims) {
  85. reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input),
  86. tflite::micro::GetTensorData<float>(input),
  87. tflite::micro::GetTensorShape(output),
  88. tflite::micro::GetTensorData<float>(output));
  89. } else {
  90. TF_LITE_ENSURE(
  91. context,
  92. reference_ops::Mean(
  93. tflite::micro::GetTensorData<float>(input), input->dims->data,
  94. input->dims->size, tflite::micro::GetTensorData<float>(output),
  95. output->dims->data, output->dims->size,
  96. tflite::micro::GetTensorData<int>(axis), num_axis,
  97. params->keep_dims, temp_index, resolved_axis,
  98. tflite::micro::GetTensorData<float>(output)));
  99. }
  100. } break;
  101. default:
  102. // TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018)
  103. TF_LITE_ENSURE_MSG(context, false,
  104. "Currently, only float32 input type "
  105. "is supported.");
  106. }
  107. return kTfLiteOk;
  108. }
  109. } // namespace reduce
  110. TfLiteRegistration Register_MEAN() {
  111. return {/*init=*/nullptr,
  112. /*free=*/nullptr,
  113. /*prepare=*/reduce::PrepareMeanOrSum,
  114. /*invoke=*/reduce::EvalMean,
  115. /*profiling_string=*/nullptr,
  116. /*builtin_code=*/0,
  117. /*custom_name=*/nullptr,
  118. /*version=*/0};
  119. }
  120. } // namespace micro
  121. } // namespace ops
  122. } // namespace tflite