test_utils.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_UTILS_H_
  13. #define TENSORFLOW_LITE_MICRO_TESTING_TEST_UTILS_H_
  14. #include <cmath>
  15. #include <cstdint>
  16. #include <initializer_list>
  17. #include <limits>
  18. #include "tensorflow/lite/c/common.h"
  19. #include "tensorflow/lite/core/api/tensor_utils.h"
  20. #include "tensorflow/lite/micro/micro_utils.h"
  21. #include "tensorflow/lite/micro/test_helpers.h"
  22. #include "tensorflow/lite/micro/testing/micro_test.h"
  23. namespace tflite {
  24. namespace testing {
  25. // Note: These methods are deprecated, do not use. See b/141332970.
  26. // TODO(kreeger): Don't use this anymore in our tests. Optimized compiler
  27. // settings can play with pointer placement on the stack (b/140130236).
  28. inline TfLiteIntArray* IntArrayFromInitializer(
  29. std::initializer_list<int> int_initializer) {
  30. return IntArrayFromInts(int_initializer.begin());
  31. }
  32. // Derives the quantization range max from scaling factor and zero point.
  33. template <typename T>
  34. inline float MaxFromZeroPointScale(const int zero_point, const float scale) {
  35. return (std::numeric_limits<T>::max() - zero_point) * scale;
  36. }
  37. // Derives the quantization range min from scaling factor and zero point.
  38. template <typename T>
  39. inline float MinFromZeroPointScale(const int zero_point, const float scale) {
  40. return (std::numeric_limits<T>::min() - zero_point) * scale;
  41. }
  42. // Derives the quantization scaling factor from a min and max range.
  43. template <typename T>
  44. inline float ScaleFromMinMax(const float min, const float max) {
  45. return (max - min) / ((std::numeric_limits<T>::max() * 1.0) -
  46. std::numeric_limits<T>::min());
  47. }
  48. // Derives the quantization zero point from a min and max range.
  49. template <typename T>
  50. inline int ZeroPointFromMinMax(const float min, const float max) {
  51. return static_cast<int>(std::numeric_limits<T>::min()) +
  52. static_cast<int>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
  53. }
  54. // Converts a float value into an unsigned eight-bit quantized value.
  55. uint8_t F2Q(float value, float min, float max);
  56. // Converts a float value into a signed eight-bit quantized value.
  57. int8_t F2QS(const float value, const float min, const float max);
  58. // Converts a float value into a signed thirty-two-bit quantized value. Note
  59. // that values close to max int and min int may see significant error due to
  60. // a lack of floating point granularity for large values.
  61. int32_t F2Q32(const float value, const float scale);
  62. // TODO(b/141330728): Move this method elsewhere as part clean up.
  63. void PopulateContext(TfLiteTensor* tensors, int tensors_size,
  64. ErrorReporter* error_reporter, TfLiteContext* context);
  65. TfLiteTensor CreateFloatTensor(std::initializer_list<float> data,
  66. TfLiteIntArray* dims, const char* name,
  67. bool is_variable = false);
  68. TfLiteTensor CreateBoolTensor(std::initializer_list<bool> data,
  69. TfLiteIntArray* dims, const char* name,
  70. bool is_variable = false);
  71. TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims,
  72. const char* name, float min, float max,
  73. bool is_variable = false);
  74. TfLiteTensor CreateQuantizedTensor(std::initializer_list<uint8_t> data,
  75. TfLiteIntArray* dims, const char* name,
  76. float min, float max,
  77. bool is_variable = false);
  78. TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims,
  79. const char* name, float min, float max,
  80. bool is_variable = false);
  81. TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
  82. TfLiteIntArray* dims, const char* name,
  83. float min, float max,
  84. bool is_variable = false);
  85. TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
  86. TfLiteIntArray* dims, const char* name,
  87. bool is_variable = false);
  88. TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
  89. TfLiteIntArray* dims, const char* name,
  90. bool is_variable = false);
  91. TfLiteTensor CreateQuantizedTensor(float* data, int16_t* quantized_data,
  92. TfLiteIntArray* dims, const char* name,
  93. bool is_variable = false);
  94. TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
  95. const char* name, float scale,
  96. bool is_variable = false);
  97. TfLiteTensor CreateQuantized32Tensor(std::initializer_list<int32_t> data,
  98. TfLiteIntArray* dims, const char* name,
  99. float scale, bool is_variable = false);
  100. template <typename input_type = int32_t,
  101. TfLiteType tensor_input_type = kTfLiteInt32>
  102. inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
  103. const char* name, bool is_variable = false) {
  104. TfLiteTensor result;
  105. result.type = tensor_input_type;
  106. result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data));
  107. result.dims = dims;
  108. result.allocation_type = kTfLiteMemNone;
  109. result.bytes = ElementCount(*dims) * sizeof(input_type);
  110. result.allocation = nullptr;
  111. result.name = name;
  112. result.is_variable = is_variable;
  113. return result;
  114. }
  115. template <typename input_type = int32_t,
  116. TfLiteType tensor_input_type = kTfLiteInt32>
  117. inline TfLiteTensor CreateTensor(std::initializer_list<input_type> data,
  118. TfLiteIntArray* dims, const char* name,
  119. bool is_variable = false) {
  120. return CreateTensor<input_type, tensor_input_type>(data.begin(), dims, name,
  121. is_variable);
  122. }
  123. } // namespace testing
  124. } // namespace tflite
  125. #endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_UTILS_H_