reduce.cc 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #include "tensorflow/lite/kernels/internal/reference/reduce.h"
  13. #include "tensorflow/lite/c/builtin_op_data.h"
  14. #include "tensorflow/lite/c/common.h"
  15. #include "tensorflow/lite/kernels/internal/quantization_util.h"
  16. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
  17. #include "tensorflow/lite/kernels/internal/types.h"
  18. #include "tensorflow/lite/kernels/kernel_util.h"
  19. namespace tflite {
  20. namespace ops {
  21. namespace micro {
  22. namespace reduce {
  23. constexpr int kMaxNumberOfAxis = 4;
  24. constexpr int kMaxNumberOfReducedAxis = 2;
  25. TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) {
  26. // Inputs Tensor (dtype depends on quantization):
  27. // [0] = Input
  28. // [1] = Axis
  29. // Outputs Tensor (dtype depends on quantization):
  30. // [0] = Output
  31. // Validate number of inputs and outputs
  32. TF_LITE_ENSURE_EQ(context, node->inputs->size, 2);
  33. TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
  34. // Validate axis type
  35. const TfLiteTensor* axis = GetInput(context, node, 1);
  36. TF_LITE_ENSURE_TYPES_EQ(context, axis->type, kTfLiteInt32);
  37. return kTfLiteOk;
  38. }
  39. TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) {
  40. TF_LITE_ENSURE_OK(context, PrepareSimple(context, node));
  41. // TODO(b/144955155): Support uint8(b/144955155) and int8(b/144955018)
  42. return kTfLiteOk;
  43. }
  44. void ResolveAxis(const int* axis_data, int axis_count,
  45. tflite::MeanParams* op_params) {
  46. int i = 0;
  47. for (; i < axis_count; ++i) {
  48. op_params->axis[i] = static_cast<int16>(axis_data[i]);
  49. }
  50. for (; i < 4; ++i) {
  51. op_params->axis[i] = 1;
  52. }
  53. op_params->axis_count = axis_count;
  54. }
  55. TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {
  56. const TfLiteTensor* input = GetInput(context, node, 0);
  57. const TfLiteTensor* axis = GetInput(context, node, 1);
  58. TfLiteTensor* output = GetOutput(context, node, 0);
  59. TfLiteReducerParams* params =
  60. reinterpret_cast<TfLiteReducerParams*>(node->builtin_data);
  61. int num_axis = static_cast<int>(NumElements(axis));
  62. int temp_index[kMaxNumberOfAxis];
  63. int resolved_axis[kMaxNumberOfReducedAxis];
  64. switch (input->type) {
  65. case kTfLiteFloat32: {
  66. tflite::MeanParams op_params;
  67. ResolveAxis(GetTensorData<int>(axis), num_axis, &op_params);
  68. // TODO(b/146571391): Support only 4D Input and 2D Axis for Mean until
  69. // scratch tensor allocation has been implemented in (b/132070898)
  70. bool is_valid_inputs =
  71. (NumDimensions(input) == 4 && op_params.axis_count == 2 &&
  72. ((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
  73. (op_params.axis[0] == 2 && op_params.axis[1] == 1)));
  74. TF_LITE_ENSURE_MSG(
  75. context, is_valid_inputs == true,
  76. "Number of Input "
  77. "dimensions != 4 OR the Axis is not either [1, 2] or [2, 1]");
  78. // TODO(b/139102329): Handle the below special case in the combined
  79. // reference method.
  80. // Defer to specialized implementation for 4D Mean across axes 1 & 2.
  81. if (params->keep_dims) {
  82. reference_ops::Mean(op_params, GetTensorShape(input),
  83. GetTensorData<float>(input), GetTensorShape(output),
  84. GetTensorData<float>(output));
  85. } else {
  86. TF_LITE_ENSURE(
  87. context,
  88. reference_ops::Mean(GetTensorData<float>(input), input->dims->data,
  89. input->dims->size, GetTensorData<float>(output),
  90. output->dims->data, output->dims->size,
  91. GetTensorData<int>(axis), num_axis,
  92. params->keep_dims, temp_index, resolved_axis,
  93. GetTensorData<float>(output)));
  94. }
  95. } break;
  96. default:
  97. // TODO(b/144955155): Support uint8(b/144955155) and int8(b/144955018)
  98. TF_LITE_ENSURE_MSG(context, false,
  99. "Currently, only float32 input type "
  100. "is supported.");
  101. }
  102. return kTfLiteOk;
  103. }
  104. } // namespace reduce
  105. TfLiteRegistration* Register_MEAN() {
  106. static TfLiteRegistration r = {/*init=*/nullptr,
  107. /*free=*/nullptr,
  108. /*prepare=*/reduce::PrepareMeanOrSum,
  109. /*invoke=*/reduce::EvalMean,
  110. /*profiling_string=*/nullptr,
  111. /*builtin_code=*/0,
  112. /*custom_name=*/nullptr,
  113. /*version=*/0};
  114. return &r;
  115. }
  116. } // namespace micro
  117. } // namespace ops
  118. } // namespace tflite