quantize.cc 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #include "tensorflow/lite/kernels/internal/reference/quantize.h"
  13. #include "tensorflow/lite/c/common.h"
  14. #include "tensorflow/lite/kernels/internal/quantization_util.h"
  15. #include "tensorflow/lite/kernels/internal/reference/requantize.h"
  16. #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
  17. #include "tensorflow/lite/kernels/kernel_util.h"
  18. #include "tensorflow/lite/micro/micro_utils.h"
  19. namespace tflite {
  20. namespace ops {
  21. namespace micro {
  22. namespace quantize {
  23. struct OpData {
  24. // The scaling factor from input to output (aka the 'real multiplier') can
  25. // be represented as a fixed point multiplier plus a left shift.
  26. int32_t output_multiplier;
  27. int output_shift;
  28. };
  29. void* Init(TfLiteContext* context, const char* buffer, size_t length) {
  30. TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
  31. void* data = nullptr;
  32. if (context->AllocatePersistentBuffer(context, sizeof(OpData), &data) ==
  33. kTfLiteError) {
  34. return nullptr;
  35. }
  36. return data;
  37. }
  38. TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
  39. TFLITE_DCHECK(node->user_data != nullptr);
  40. OpData* data = static_cast<OpData*>(node->user_data);
  41. TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
  42. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
  43. const TfLiteTensor* input = GetInput(context, node, 0);
  44. TfLiteTensor* output = GetOutput(context, node, 0);
  45. // TODO(b/128934713): Add support for fixed-point per-channel quantization.
  46. // Currently this only support affine per-layer quantization.
  47. TF_LITE_ENSURE_EQ(context, output->quantization.type,
  48. kTfLiteAffineQuantization);
  49. const auto* affine_quantization =
  50. reinterpret_cast<TfLiteAffineQuantization*>(output->quantization.params);
  51. TF_LITE_ENSURE(context, affine_quantization);
  52. TF_LITE_ENSURE(context, affine_quantization->scale);
  53. TF_LITE_ENSURE(context, affine_quantization->scale->size == 1);
  54. TF_LITE_ENSURE(context, input->type == kTfLiteFloat32 ||
  55. input->type == kTfLiteInt16 ||
  56. input->type == kTfLiteInt8);
  57. TF_LITE_ENSURE(context,
  58. output->type == kTfLiteUInt8 || output->type == kTfLiteInt8);
  59. if ((input->type == kTfLiteInt16 || input->type == kTfLiteInt8) &&
  60. output->type == kTfLiteInt8) {
  61. double effective_scale =
  62. static_cast<double>(input->params.scale / output->params.scale);
  63. QuantizeMultiplier(effective_scale, &data->output_multiplier,
  64. &data->output_shift);
  65. }
  66. return kTfLiteOk;
  67. }
  68. TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
  69. TFLITE_DCHECK(node->user_data != nullptr);
  70. OpData* data = static_cast<OpData*>(node->user_data);
  71. const TfLiteTensor* input = GetInput(context, node, 0);
  72. TfLiteTensor* output = GetOutput(context, node, 0);
  73. tflite::QuantizationParams op_params;
  74. op_params.zero_point = output->params.zero_point;
  75. op_params.scale = static_cast<double>(output->params.scale);
  76. if (input->type == kTfLiteFloat32) {
  77. switch (output->type) {
  78. case kTfLiteInt8:
  79. reference_ops::AffineQuantize(
  80. op_params, GetTensorShape(input), GetTensorData<float>(input),
  81. GetTensorShape(output), GetTensorData<int8_t>(output));
  82. break;
  83. case kTfLiteUInt8:
  84. reference_ops::AffineQuantize(
  85. op_params, GetTensorShape(input), GetTensorData<float>(input),
  86. GetTensorShape(output), GetTensorData<uint8_t>(output));
  87. break;
  88. default:
  89. TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.",
  90. TfLiteTypeGetName(input->type),
  91. TfLiteTypeGetName(output->type));
  92. return kTfLiteError;
  93. }
  94. } else if (input->type == kTfLiteInt16) {
  95. size_t size = ElementCount(*input->dims);
  96. switch (output->type) {
  97. case kTfLiteInt8:
  98. reference_ops::Requantize(
  99. GetTensorData<int16_t>(input), size, data->output_multiplier,
  100. data->output_shift, input->params.zero_point,
  101. output->params.zero_point, GetTensorData<int8_t>(output));
  102. break;
  103. default:
  104. TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.",
  105. TfLiteTypeGetName(input->type),
  106. TfLiteTypeGetName(output->type));
  107. return kTfLiteError;
  108. }
  109. } else if (input->type == kTfLiteInt8) {
  110. // Int8 to Int8 requantization, required if the input and output tensors
  111. // have different scales and/or zero points.
  112. size_t size = ElementCount(*input->dims);
  113. switch (output->type) {
  114. case kTfLiteInt8:
  115. reference_ops::Requantize(
  116. GetTensorData<int8_t>(input), size, data->output_multiplier,
  117. data->output_shift, input->params.zero_point,
  118. output->params.zero_point, GetTensorData<int8_t>(output));
  119. break;
  120. default:
  121. TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.",
  122. TfLiteTypeGetName(input->type),
  123. TfLiteTypeGetName(output->type));
  124. return kTfLiteError;
  125. }
  126. } else {
  127. TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.",
  128. TfLiteTypeGetName(input->type),
  129. TfLiteTypeGetName(output->type));
  130. return kTfLiteError;
  131. }
  132. return kTfLiteOk;
  133. }
  134. } // namespace quantize
  135. // This Op (QUANTIZE) quantizes the input and produces quantized output.
  136. // AffineQuantize takes scale and zero point and quantizes the float value to
  137. // quantized output, in int8 or uint8 format.
  138. TfLiteRegistration* Register_QUANTIZE() {
  139. static TfLiteRegistration r = {/*init=*/quantize::Init,
  140. /*free=*/nullptr,
  141. /*prepare=*/quantize::Prepare,
  142. /*invoke=*/quantize::Eval,
  143. /*profiling_string=*/nullptr,
  144. /*builtin_code=*/0,
  145. /*custom_name=*/nullptr,
  146. /*version=*/0};
  147. return &r;
  148. }
  149. } // namespace micro
  150. } // namespace ops
  151. } // namespace tflite