common.h 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956
  1. /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
  13. #define TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
  14. #ifndef ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
  15. #ifdef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
  16. #define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
  17. #endif
  18. #endif
  19. #include <functional>
  20. #include "fixedpoint/fixedpoint.h"
  21. #include "tensorflow/lite/kernels/internal/cppmath.h"
  22. #include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
  23. #include "tensorflow/lite/kernels/internal/types.h"
  24. namespace tflite {
  25. constexpr int kReverseShift = -1;
  26. inline void GetActivationMinMax(FusedActivationFunctionType ac,
  27. float* output_activation_min,
  28. float* output_activation_max) {
  29. switch (ac) {
  30. case FusedActivationFunctionType::kNone:
  31. *output_activation_min = std::numeric_limits<float>::lowest();
  32. *output_activation_max = std::numeric_limits<float>::max();
  33. break;
  34. case FusedActivationFunctionType::kRelu:
  35. *output_activation_min = 0.f;
  36. *output_activation_max = std::numeric_limits<float>::max();
  37. break;
  38. case FusedActivationFunctionType::kRelu1:
  39. *output_activation_min = -1.f;
  40. *output_activation_max = 1.f;
  41. break;
  42. case FusedActivationFunctionType::kRelu6:
  43. *output_activation_min = 0.f;
  44. *output_activation_max = 6.f;
  45. break;
  46. }
  47. }
  48. template <typename T>
  49. inline T ActivationFunctionWithMinMax(T x, T output_activation_min,
  50. T output_activation_max) {
  51. using std::max;
  52. using std::min;
  53. return min(max(x, output_activation_min), output_activation_max);
  54. }
  55. // Legacy function, left for compatibility only.
  56. template <FusedActivationFunctionType Ac>
  57. float ActivationFunction(float x) {
  58. float output_activation_min, output_activation_max;
  59. GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
  60. return ActivationFunctionWithMinMax(x, output_activation_min,
  61. output_activation_max);
  62. }
  63. inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
  64. const float* bias_data, int array_size,
  65. float* array_data) {
  66. // Note: see b/132215220: in May 2019 we thought it would be OK to replace
  67. // this with the Eigen one-liner:
  68. // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max).
  69. // This turned out to severely regress performance: +4ms (i.e. 8%) on
  70. // MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now.
  71. TFLITE_DCHECK_EQ((array_size % bias_size), 0);
  72. #ifdef USE_NEON
  73. float* array_ptr = array_data;
  74. float* array_end_ptr = array_ptr + array_size;
  75. const auto clamp_min_vec = vdupq_n_f32(clamp_min);
  76. const auto clamp_max_vec = vdupq_n_f32(clamp_max);
  77. for (; array_ptr != array_end_ptr; array_ptr += bias_size) {
  78. int i = 0;
  79. for (; i <= bias_size - 16; i += 16) {
  80. auto b0 = vld1q_f32(bias_data + i);
  81. auto b1 = vld1q_f32(bias_data + i + 4);
  82. auto b2 = vld1q_f32(bias_data + i + 8);
  83. auto b3 = vld1q_f32(bias_data + i + 12);
  84. auto a0 = vld1q_f32(array_ptr + i);
  85. auto a1 = vld1q_f32(array_ptr + i + 4);
  86. auto a2 = vld1q_f32(array_ptr + i + 8);
  87. auto a3 = vld1q_f32(array_ptr + i + 12);
  88. auto x0 = vaddq_f32(a0, b0);
  89. auto x1 = vaddq_f32(a1, b1);
  90. auto x2 = vaddq_f32(a2, b2);
  91. auto x3 = vaddq_f32(a3, b3);
  92. x0 = vmaxq_f32(clamp_min_vec, x0);
  93. x1 = vmaxq_f32(clamp_min_vec, x1);
  94. x2 = vmaxq_f32(clamp_min_vec, x2);
  95. x3 = vmaxq_f32(clamp_min_vec, x3);
  96. x0 = vminq_f32(clamp_max_vec, x0);
  97. x1 = vminq_f32(clamp_max_vec, x1);
  98. x2 = vminq_f32(clamp_max_vec, x2);
  99. x3 = vminq_f32(clamp_max_vec, x3);
  100. vst1q_f32(array_ptr + i, x0);
  101. vst1q_f32(array_ptr + i + 4, x1);
  102. vst1q_f32(array_ptr + i + 8, x2);
  103. vst1q_f32(array_ptr + i + 12, x3);
  104. }
  105. for (; i <= bias_size - 4; i += 4) {
  106. auto b = vld1q_f32(bias_data + i);
  107. auto a = vld1q_f32(array_ptr + i);
  108. auto x = vaddq_f32(a, b);
  109. x = vmaxq_f32(clamp_min_vec, x);
  110. x = vminq_f32(clamp_max_vec, x);
  111. vst1q_f32(array_ptr + i, x);
  112. }
  113. for (; i < bias_size; i++) {
  114. array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i],
  115. clamp_min, clamp_max);
  116. }
  117. }
  118. #else // not NEON
  119. for (int array_offset = 0; array_offset < array_size;
  120. array_offset += bias_size) {
  121. for (int i = 0; i < bias_size; i++) {
  122. array_data[array_offset + i] = ActivationFunctionWithMinMax(
  123. array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max);
  124. }
  125. }
  126. #endif
  127. }
  128. inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
  129. int32_t x, int32_t quantized_multiplier, int left_shift) {
  130. using gemmlowp::RoundingDivideByPOT;
  131. using gemmlowp::SaturatingRoundingDoublingHighMul;
  132. return RoundingDivideByPOT(
  133. SaturatingRoundingDoublingHighMul(x, quantized_multiplier), -left_shift);
  134. }
  135. inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
  136. int32_t x, int32_t quantized_multiplier, int left_shift) {
  137. using gemmlowp::SaturatingRoundingDoublingHighMul;
  138. return SaturatingRoundingDoublingHighMul(x * (1 << left_shift),
  139. quantized_multiplier);
  140. }
  141. inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
  142. int32_t quantized_multiplier,
  143. int shift) {
  144. using gemmlowp::RoundingDivideByPOT;
  145. using gemmlowp::SaturatingRoundingDoublingHighMul;
  146. int left_shift = shift > 0 ? shift : 0;
  147. int right_shift = shift > 0 ? 0 : -shift;
  148. return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul(
  149. x * (1 << left_shift), quantized_multiplier),
  150. right_shift);
  151. }
  152. inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
  153. int32_t quantized_multiplier,
  154. int shift) {
  155. // Inputs:
  156. // - quantized_multiplier has fixed point at bit 31
  157. // - shift is -31 to +7 (negative for right shift)
  158. //
  159. // Assumptions: The following input ranges are assumed
  160. // - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1)
  161. // - scaling is chosen so final scaled result fits in int32_t
  162. // - input x is in the range -(1<<47) <= x < (1<<47)
  163. assert(quantized_multiplier >= 0);
  164. assert(shift >= -31 && shift < 8);
  165. int32_t reduced_multiplier = (quantized_multiplier + (1 << 15)) >> 16;
  166. int total_shift = 15 - shift;
  167. x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1));
  168. int32_t result = x >> total_shift;
  169. return result;
  170. }
  171. template <typename T>
  172. int CountLeadingZeros(T integer_input) {
  173. static_assert(std::is_unsigned<T>::value,
  174. "Only unsigned integer types handled.");
  175. #if defined(__GNUC__)
  176. return integer_input ? __builtin_clz(integer_input)
  177. : std::numeric_limits<T>::digits;
  178. #else
  179. if (integer_input == 0) {
  180. return std::numeric_limits<T>::digits;
  181. }
  182. const T one_in_leading_positive = static_cast<T>(1)
  183. << (std::numeric_limits<T>::digits - 1);
  184. int leading_zeros = 0;
  185. while (integer_input < one_in_leading_positive) {
  186. integer_input <<= 1;
  187. ++leading_zeros;
  188. }
  189. return leading_zeros;
  190. #endif
  191. }
  192. template <typename T>
  193. inline int CountLeadingSignBits(T integer_input) {
  194. static_assert(std::is_signed<T>::value, "Only signed integer types handled.");
  195. #if defined(__GNUC__) && !defined(__clang__)
  196. return integer_input ? __builtin_clrsb(integer_input)
  197. : std::numeric_limits<T>::digits;
  198. #else
  199. using U = typename std::make_unsigned<T>::type;
  200. return integer_input >= 0
  201. ? CountLeadingZeros(static_cast<U>(integer_input)) - 1
  202. : integer_input != std::numeric_limits<T>::min()
  203. ? CountLeadingZeros(2 * static_cast<U>(-integer_input) - 1)
  204. : 0;
  205. #endif
  206. }
  207. // Use "count leading zeros" helper functions to do a fast Floor(log_2(x)).
  208. template <typename Integer>
  209. inline Integer FloorLog2(Integer n) {
  210. static_assert(std::is_integral<Integer>::value, "");
  211. static_assert(std::is_signed<Integer>::value, "");
  212. static_assert(sizeof(Integer) == 4 || sizeof(Integer) == 8, "");
  213. TFLITE_CHECK_GT(n, 0);
  214. if (sizeof(Integer) == 4) {
  215. return 30 - CountLeadingSignBits(n);
  216. } else {
  217. return 62 - CountLeadingSignBits(n);
  218. }
  219. }
  220. // generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in
  221. // softmax
  222. inline void gen_lut(const std::function<double(double)>& func, double min,
  223. double max, int16_t* table, const int num) {
  224. // size of table should equal to num + 1
  225. // last element only for slope calculation
  226. double step = (max - min) / (num - 1);
  227. double half_step = step / 2.0;
  228. for (int i = 0; i < num - 1; i++) {
  229. double sample_val = TfLiteRound(func(min + i * step) * 32768.0);
  230. double midpoint_interp_val =
  231. TfLiteRound((func(min + (i + 1) * step) * 32768.0 +
  232. TfLiteRound(func(min + i * step) * 32768.0)) /
  233. 2.0);
  234. double midpoint_val =
  235. TfLiteRound(func(min + i * step + half_step) * 32768.0);
  236. double midpoint_err = midpoint_interp_val - midpoint_val;
  237. double bias = TfLiteRound(midpoint_err / 2.0);
  238. table[i] = std::min(std::max(sample_val - bias, -32768.0), 32767.0);
  239. }
  240. table[num - 1] =
  241. std::min(std::max(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0);
  242. }
  243. // int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax
  244. inline int16_t generic_int16_table_lookup(int16_t value, const int16_t* lut) {
  245. // 512 base value, lut[513] only for calculate slope
  246. uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
  247. assert(index < 512 && "LUT index out of range.");
  248. int16_t offset = value & 0x7f;
  249. // base and slope are Q0.15
  250. int16_t base = lut[index];
  251. int16_t slope = lut[index + 1] - lut[index];
  252. // Q0.15 * Q0.7 = Q0.22
  253. // Round and convert from Q0.22 to Q0.15
  254. int32_t delta = (static_cast<int32_t>(slope) * offset + 64) >> 7;
  255. // Q0.15 + Q0.15
  256. return base + delta;
  257. }
  258. // Table of sigmoid(i/24) at 0.16 format - 256 elements.
  259. // We use combined sigmoid and tanh look-up table, since
  260. // tanh(x) = 2*sigmoid(2*x) -1.
  261. // Both functions are symmetric, so the LUT table is only needed
  262. // for the absolute value of the input.
  263. static const uint16_t sigmoid_table_uint16[256] = {
  264. 32768, 33451, 34133, 34813, 35493, 36169, 36843, 37513, 38180, 38841, 39498,
  265. 40149, 40794, 41432, 42064, 42688, 43304, 43912, 44511, 45102, 45683, 46255,
  266. 46817, 47369, 47911, 48443, 48964, 49475, 49975, 50464, 50942, 51409, 51865,
  267. 52311, 52745, 53169, 53581, 53983, 54374, 54755, 55125, 55485, 55834, 56174,
  268. 56503, 56823, 57133, 57433, 57724, 58007, 58280, 58544, 58800, 59048, 59288,
  269. 59519, 59743, 59959, 60168, 60370, 60565, 60753, 60935, 61110, 61279, 61441,
  270. 61599, 61750, 61896, 62036, 62172, 62302, 62428, 62549, 62666, 62778, 62886,
  271. 62990, 63090, 63186, 63279, 63368, 63454, 63536, 63615, 63691, 63765, 63835,
  272. 63903, 63968, 64030, 64090, 64148, 64204, 64257, 64308, 64357, 64405, 64450,
  273. 64494, 64536, 64576, 64614, 64652, 64687, 64721, 64754, 64786, 64816, 64845,
  274. 64873, 64900, 64926, 64950, 64974, 64997, 65019, 65039, 65060, 65079, 65097,
  275. 65115, 65132, 65149, 65164, 65179, 65194, 65208, 65221, 65234, 65246, 65258,
  276. 65269, 65280, 65291, 65301, 65310, 65319, 65328, 65337, 65345, 65352, 65360,
  277. 65367, 65374, 65381, 65387, 65393, 65399, 65404, 65410, 65415, 65420, 65425,
  278. 65429, 65433, 65438, 65442, 65445, 65449, 65453, 65456, 65459, 65462, 65465,
  279. 65468, 65471, 65474, 65476, 65479, 65481, 65483, 65485, 65488, 65489, 65491,
  280. 65493, 65495, 65497, 65498, 65500, 65501, 65503, 65504, 65505, 65507, 65508,
  281. 65509, 65510, 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65517, 65518,
  282. 65519, 65520, 65520, 65521, 65522, 65522, 65523, 65523, 65524, 65524, 65525,
  283. 65525, 65526, 65526, 65526, 65527, 65527, 65528, 65528, 65528, 65529, 65529,
  284. 65529, 65529, 65530, 65530, 65530, 65530, 65531, 65531, 65531, 65531, 65531,
  285. 65532, 65532, 65532, 65532, 65532, 65532, 65533, 65533, 65533, 65533, 65533,
  286. 65533, 65533, 65533, 65534, 65534, 65534, 65534, 65534, 65534, 65534, 65534,
  287. 65534, 65534, 65535};
  288. // TODO(b/77858996): Add these to gemmlowp.
  289. template <typename IntegerType>
  290. IntegerType SaturatingAddNonGemmlowp(IntegerType a, IntegerType b) {
  291. static_assert(std::is_same<IntegerType, void>::value, "unimplemented");
  292. return a;
  293. }
  294. template <>
  295. inline std::int32_t SaturatingAddNonGemmlowp(std::int32_t a, std::int32_t b) {
  296. std::int64_t a64 = a;
  297. std::int64_t b64 = b;
  298. std::int64_t sum = a64 + b64;
  299. return static_cast<std::int32_t>(std::min(
  300. static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()),
  301. std::max(
  302. static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::min()),
  303. sum)));
  304. }
  305. template <typename tRawType, int tIntegerBits>
  306. gemmlowp::FixedPoint<tRawType, tIntegerBits> SaturatingAddNonGemmlowp(
  307. gemmlowp::FixedPoint<tRawType, tIntegerBits> a,
  308. gemmlowp::FixedPoint<tRawType, tIntegerBits> b) {
  309. return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
  310. SaturatingAddNonGemmlowp(a.raw(), b.raw()));
  311. }
  312. template <typename IntegerType>
  313. IntegerType SaturatingSub(IntegerType a, IntegerType b) {
  314. static_assert(std::is_same<IntegerType, void>::value, "unimplemented");
  315. return a;
  316. }
  317. template <>
  318. inline std::int16_t SaturatingSub(std::int16_t a, std::int16_t b) {
  319. std::int32_t a32 = a;
  320. std::int32_t b32 = b;
  321. std::int32_t diff = a32 - b32;
  322. return static_cast<std::int16_t>(
  323. std::min(static_cast<int32_t>(32767),
  324. std::max(static_cast<int32_t>(-32768), diff)));
  325. }
  326. template <>
  327. inline std::int32_t SaturatingSub(std::int32_t a, std::int32_t b) {
  328. std::int64_t a64 = a;
  329. std::int64_t b64 = b;
  330. std::int64_t diff = a64 - b64;
  331. return static_cast<std::int32_t>(std::min(
  332. static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()),
  333. std::max(
  334. static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::min()),
  335. diff)));
  336. }
  337. template <typename tRawType, int tIntegerBits>
  338. gemmlowp::FixedPoint<tRawType, tIntegerBits> SaturatingSub(
  339. gemmlowp::FixedPoint<tRawType, tIntegerBits> a,
  340. gemmlowp::FixedPoint<tRawType, tIntegerBits> b) {
  341. return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
  342. SaturatingSub(a.raw(), b.raw()));
  343. }
  344. // End section to be moved to gemmlowp.
  345. template <typename IntegerType>
  346. IntegerType SaturatingRoundingMultiplyByPOTParam(IntegerType x, int exponent) {
  347. if (exponent == 0) {
  348. return x;
  349. }
  350. using ScalarIntegerType =
  351. typename gemmlowp::FixedPointRawTypeTraits<IntegerType>::ScalarRawType;
  352. const IntegerType min =
  353. gemmlowp::Dup<IntegerType>(std::numeric_limits<ScalarIntegerType>::min());
  354. const IntegerType max =
  355. gemmlowp::Dup<IntegerType>(std::numeric_limits<ScalarIntegerType>::max());
  356. const int ScalarIntegerTypeBits = 8 * sizeof(ScalarIntegerType);
  357. const std::int32_t threshold =
  358. ((1 << (ScalarIntegerTypeBits - 1 - exponent)) - 1);
  359. const IntegerType positive_mask =
  360. gemmlowp::MaskIfGreaterThan(x, gemmlowp::Dup<IntegerType>(threshold));
  361. const IntegerType negative_mask =
  362. gemmlowp::MaskIfLessThan(x, gemmlowp::Dup<IntegerType>(-threshold));
  363. IntegerType result = gemmlowp::ShiftLeft(x, exponent);
  364. result = gemmlowp::SelectUsingMask(positive_mask, max, result);
  365. result = gemmlowp::SelectUsingMask(negative_mask, min, result);
  366. return result;
  367. }
  368. // If we want to leave IntegerBits fixed, then multiplication
  369. // by a power of two has to be saturating/rounding, not exact anymore.
  370. template <typename tRawType, int tIntegerBits>
  371. gemmlowp::FixedPoint<tRawType, tIntegerBits>
  372. SaturatingRoundingMultiplyByPOTParam(
  373. gemmlowp::FixedPoint<tRawType, tIntegerBits> a, int exponent) {
  374. return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
  375. SaturatingRoundingMultiplyByPOTParam(a.raw(), exponent));
  376. }
  377. // Convert int32_t multiplier to int16_t with rounding.
  378. inline void DownScaleInt32ToInt16Multiplier(int32_t multiplier_int32_t,
  379. int16_t* multiplier_int16_t) {
  380. TFLITE_DCHECK_GE(multiplier_int32_t, 0);
  381. static constexpr int32_t kRoundingOffset = 1 << 15;
  382. if (multiplier_int32_t >=
  383. std::numeric_limits<int32_t>::max() - kRoundingOffset) {
  384. *multiplier_int16_t = std::numeric_limits<int16_t>::max();
  385. return;
  386. }
  387. const int32_t result = (multiplier_int32_t + kRoundingOffset) >> 16;
  388. TFLITE_DCHECK_LE(result << 16, multiplier_int32_t + kRoundingOffset);
  389. TFLITE_DCHECK_GT(result << 16, multiplier_int32_t - kRoundingOffset);
  390. *multiplier_int16_t = result;
  391. TFLITE_DCHECK_EQ(*multiplier_int16_t, result);
  392. }
  393. // Minimum output bits to accommodate log of maximum input range. It actually
  394. // does not matter if one considers, say, [-64,64] or [-64,64).
  395. //
  396. // For example, run this through Octave:
  397. // [0:127; ...
  398. // ceil(log(abs( log(2.^(0:127))+1 ))/log(2)); ...
  399. // ceil(log(abs( log(2.^(0:127))+1 ))/log(2))]
  400. constexpr int min_log_x_output_bits(int input_bits) {
  401. return input_bits > 90 ? 7
  402. : input_bits > 44 ? 6
  403. : input_bits > 21 ? 5
  404. : input_bits > 10 ? 4
  405. : input_bits > 4 ? 3
  406. : input_bits > 1 ? 2
  407. : 1;
  408. }
  409. // Although currently the name of this function says that it cannot handle
  410. // values less than 1, in practice it can handle as low as 1/x_max, where
  411. // x_max is the largest representable input. In other words, the output range
  412. // is symmetric.
  413. template <int OutputIntegerBits, int InputIntegerBits>
  414. inline gemmlowp::FixedPoint<int32_t, OutputIntegerBits>
  415. log_x_for_x_greater_than_or_equal_to_1_impl(
  416. gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
  417. // assert(__builtin_clz(0u) >= std::numeric_limits<uint32_t>::digits - 1);
  418. // assert(__builtin_clz(0u) <= std::numeric_limits<uint32_t>::digits);
  419. using FixedPoint0 = gemmlowp::FixedPoint<int32_t, 0>;
  420. // The reason for accumulating the result with an extra bit of headroom is
  421. // that z_pow_2_adj * log_2 might be saturated, and adding num_scaled *
  422. // recip_denom will otherwise introduce an error.
  423. static constexpr int kAccumIntegerBits = OutputIntegerBits + 1;
  424. using FixedPointAccum = gemmlowp::FixedPoint<int32_t, kAccumIntegerBits>;
  425. const FixedPoint0 log_2 = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
  426. FixedPoint0, 1488522236, std::log(2.0));
  427. const FixedPoint0 sqrt_sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
  428. FixedPoint0, 1805811301, std::sqrt(std::sqrt(0.5)));
  429. const FixedPoint0 sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
  430. FixedPoint0, 1518500250, std::sqrt(0.5));
  431. const FixedPoint0 one_quarter =
  432. GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPoint0, 536870912, 1.0 / 4.0);
  433. const FixedPoint0 alpha_n = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
  434. FixedPoint0, 117049297, 11.0 / 240.0 * std::sqrt(std::sqrt(2.0)));
  435. const FixedPoint0 alpha_d = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
  436. FixedPoint0, 127690142, 1.0 / 20.0 * std::sqrt(std::sqrt(2.0)));
  437. const FixedPoint0 alpha_i = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
  438. FixedPoint0, 1057819769,
  439. 2.0 / std::sqrt(std::sqrt(2.0)) - std::sqrt(std::sqrt(2.0)));
  440. const FixedPoint0 alpha_f = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
  441. FixedPoint0, 638450708, 1.0 / 4.0 * std::sqrt(std::sqrt(2.0)));
  442. const FixedPointAccum shifted_quarter =
  443. gemmlowp::Rescale<kAccumIntegerBits>(one_quarter);
  444. // Reinterpret the input value as Q0.31, because we will figure out the
  445. // required shift "ourselves" instead of using, say, Rescale.
  446. FixedPoint0 z_a = FixedPoint0::FromRaw(input_val.raw());
  447. // z_a_pow_2 = input_integer_bits - z_a_headroom;
  448. int z_a_headroom_plus_1 = CountLeadingZeros(static_cast<uint32_t>(z_a.raw()));
  449. FixedPoint0 r_a_tmp =
  450. SaturatingRoundingMultiplyByPOTParam(z_a, (z_a_headroom_plus_1 - 1));
  451. const int32_t r_a_raw =
  452. SaturatingRoundingMultiplyByPOTParam((r_a_tmp * sqrt_half).raw(), 1);
  453. // z_pow_2_adj = max(z_pow_2_a - 0.75, z_pow_2_b - 0.25);
  454. // z_pow_2_adj = max(InputIntegerBits - z_a_headroom_plus_1 + 0.25,
  455. // InputIntegerBits - z_b_headroom - 0.25);
  456. const FixedPointAccum z_a_pow_2_adj = SaturatingAddNonGemmlowp(
  457. FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
  458. InputIntegerBits - z_a_headroom_plus_1, 31 - kAccumIntegerBits)),
  459. shifted_quarter);
  460. // z_b is treated like z_a, but premultiplying by sqrt(0.5).
  461. FixedPoint0 z_b = z_a * sqrt_half;
  462. int z_b_headroom = CountLeadingZeros(static_cast<uint32_t>(z_b.raw())) - 1;
  463. const int32_t r_b_raw =
  464. SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom);
  465. const FixedPointAccum z_b_pow_2_adj = SaturatingSub(
  466. FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
  467. InputIntegerBits - z_b_headroom, 31 - kAccumIntegerBits)),
  468. shifted_quarter);
  469. const FixedPoint0 r = FixedPoint0::FromRaw(std::min(r_a_raw, r_b_raw));
  470. const FixedPointAccum z_pow_2_adj = FixedPointAccum::FromRaw(
  471. std::max(z_a_pow_2_adj.raw(), z_b_pow_2_adj.raw()));
  472. const FixedPoint0 p = gemmlowp::RoundingHalfSum(r, sqrt_sqrt_half);
  473. FixedPoint0 q = r - sqrt_sqrt_half;
  474. q = q + q;
  475. const FixedPoint0 common_sq = q * q;
  476. const FixedPoint0 num = q * r + q * common_sq * alpha_n;
  477. const FixedPoint0 denom_minus_one_0 =
  478. p * (alpha_i + q + alpha_d * common_sq) + alpha_f * q;
  479. const FixedPoint0 recip_denom =
  480. one_over_one_plus_x_for_x_in_0_1(denom_minus_one_0);
  481. const FixedPointAccum num_scaled = gemmlowp::Rescale<kAccumIntegerBits>(num);
  482. return gemmlowp::Rescale<OutputIntegerBits>(z_pow_2_adj * log_2 +
  483. num_scaled * recip_denom);
  484. }
  485. template <int OutputIntegerBits, int InputIntegerBits>
  486. inline gemmlowp::FixedPoint<int32_t, OutputIntegerBits>
  487. log_x_for_x_greater_than_or_equal_to_1(
  488. gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
  489. static_assert(
  490. OutputIntegerBits >= min_log_x_output_bits(InputIntegerBits),
  491. "Output integer bits must be sufficient to accommodate logs of inputs.");
  492. return log_x_for_x_greater_than_or_equal_to_1_impl<OutputIntegerBits,
  493. InputIntegerBits>(
  494. input_val);
  495. }
  496. inline int32_t GetReciprocal(int32_t x, int x_integer_digits,
  497. int* num_bits_over_unit) {
  498. int headroom_plus_one = CountLeadingZeros(static_cast<uint32_t>(x));
  499. // This is the number of bits to the left of the binary point above 1.0.
  500. // Consider x=1.25. In that case shifted_scale=0.8 and
  501. // no later adjustment will be needed.
  502. *num_bits_over_unit = x_integer_digits - headroom_plus_one;
  503. const int32_t shifted_sum_minus_one =
  504. static_cast<int32_t>((static_cast<uint32_t>(x) << headroom_plus_one) -
  505. (static_cast<uint32_t>(1) << 31));
  506. gemmlowp::FixedPoint<int32_t, 0> shifted_scale =
  507. gemmlowp::one_over_one_plus_x_for_x_in_0_1(
  508. gemmlowp::FixedPoint<int32_t, 0>::FromRaw(shifted_sum_minus_one));
  509. return shifted_scale.raw();
  510. }
  511. inline void GetInvSqrtQuantizedMultiplierExp(int32_t input, int reverse_shift,
  512. int32_t* output_inv_sqrt,
  513. int* output_shift) {
  514. TFLITE_DCHECK_GE(input, 0);
  515. if (input <= 1) {
  516. // Handle the input value 1 separately to avoid overflow in that case
  517. // in the general computation below (b/143972021). Also handle 0 as if it
  518. // were a 1. 0 is an invalid input here (divide by zero) and 1 is a valid
  519. // but rare/unrealistic input value. We can expect both to occur in some
  520. // incompletely trained models, but probably not in fully trained models.
  521. *output_inv_sqrt = std::numeric_limits<std::int32_t>::max();
  522. *output_shift = 0;
  523. return;
  524. }
  525. TFLITE_DCHECK_GT(input, 1);
  526. *output_shift = 11;
  527. while (input >= (1 << 29)) {
  528. input /= 4;
  529. ++*output_shift;
  530. }
  531. const unsigned max_left_shift_bits =
  532. CountLeadingZeros(static_cast<uint32_t>(input)) - 1;
  533. const unsigned max_left_shift_bit_pairs = max_left_shift_bits / 2;
  534. const unsigned left_shift_bit_pairs = max_left_shift_bit_pairs - 1;
  535. *output_shift -= left_shift_bit_pairs;
  536. input <<= 2 * left_shift_bit_pairs;
  537. TFLITE_DCHECK_GE(input, (1 << 27));
  538. TFLITE_DCHECK_LT(input, (1 << 29));
  539. using gemmlowp::FixedPoint;
  540. using gemmlowp::Rescale;
  541. using gemmlowp::SaturatingRoundingMultiplyByPOT;
  542. // Using 3 integer bits gives us enough room for the internal arithmetic in
  543. // this Newton-Raphson iteration.
  544. using F3 = FixedPoint<int32_t, 3>;
  545. using F0 = FixedPoint<int32_t, 0>;
  546. const F3 fixedpoint_input = F3::FromRaw(input >> 1);
  547. const F3 fixedpoint_half_input =
  548. SaturatingRoundingMultiplyByPOT<-1>(fixedpoint_input);
  549. const F3 fixedpoint_half_three =
  550. GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F3, (1 << 28) + (1 << 27), 1.5);
  551. // Newton-Raphson iteration
  552. // Naive unoptimized starting guess: x = 1
  553. F3 x = F3::One();
  554. // Naive unoptimized number of iterations: 5
  555. for (int i = 0; i < 5; i++) {
  556. const F3 x3 = Rescale<3>(x * x * x);
  557. x = Rescale<3>(fixedpoint_half_three * x - fixedpoint_half_input * x3);
  558. }
  559. const F0 fixedpoint_half_sqrt_2 =
  560. GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F0, 1518500250, std::sqrt(2.) / 2.);
  561. x = x * fixedpoint_half_sqrt_2;
  562. *output_inv_sqrt = x.raw();
  563. if (*output_shift < 0) {
  564. *output_inv_sqrt <<= -*output_shift;
  565. *output_shift = 0;
  566. }
  567. // Convert right shift (right is positive) to left shift.
  568. *output_shift *= reverse_shift;
  569. }
  570. // DO NOT USE THIS STRUCT FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
  571. // BROADCASTING.
  572. //
  573. // NdArrayDesc<N> describes the shape and memory layout of an N-dimensional
  574. // rectangular array of numbers.
  575. //
  576. // NdArrayDesc<N> is basically identical to Dims<N> defined in types.h.
  577. // However, as Dims<N> is to be deprecated, this class exists as an adaptor
  578. // to enable simple unoptimized implementations of element-wise broadcasting
  579. // operations.
  580. template <int N>
  581. struct NdArrayDesc {
  582. // The "extent" of each dimension. Indices along dimension d must be in the
  583. // half-open interval [0, extents[d]).
  584. int extents[N];
  585. // The number of *elements* (not bytes) between consecutive indices of each
  586. // dimension.
  587. int strides[N];
  588. };
  589. // DO NOT USE THIS FUNCTION FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
  590. // BROADCASTING.
  591. //
  592. // Same as Offset(), except takes as NdArrayDesc<N> instead of Dims<N>.
  593. inline int SubscriptToIndex(const NdArrayDesc<4>& desc, int i0, int i1, int i2,
  594. int i3) {
  595. TFLITE_DCHECK(i0 >= 0 && i0 < desc.extents[0]);
  596. TFLITE_DCHECK(i1 >= 0 && i1 < desc.extents[1]);
  597. TFLITE_DCHECK(i2 >= 0 && i2 < desc.extents[2]);
  598. TFLITE_DCHECK(i3 >= 0 && i3 < desc.extents[3]);
  599. return i0 * desc.strides[0] + i1 * desc.strides[1] + i2 * desc.strides[2] +
  600. i3 * desc.strides[3];
  601. }
  602. inline int SubscriptToIndex(const NdArrayDesc<5>& desc, int indexes[5]) {
  603. return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] +
  604. indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] +
  605. indexes[4] * desc.strides[4];
  606. }
  607. // Given the dimensions of the operands for an element-wise binary broadcast,
  608. // adjusts them so that they can be directly iterated over with simple loops.
  609. // Returns the adjusted dims as instances of NdArrayDesc in 'desc0_out' and
  610. // 'desc1_out'. 'desc0_out' and 'desc1_out' cannot be nullptr.
  611. //
  612. // This function assumes that the two input shapes are compatible up to
  613. // broadcasting and the shorter one has already been prepended with 1s to be the
  614. // same length. E.g., if shape0 is (1, 16, 16, 64) and shape1 is (1, 64),
  615. // shape1 must already have been prepended to be (1, 1, 1, 64). Recall that
  616. // Dims<N> refer to shapes in reverse order. In this case, input0_dims will be
  617. // (64, 16, 16, 1) and input1_dims will be (64, 1, 1, 1).
  618. //
  619. // When two shapes are compatible up to broadcasting, for each dimension d,
  620. // the input extents are either equal, or one of them is 1.
  621. //
  622. // This function performs the following for each dimension d:
  623. // - If the extents are equal, then do nothing since the loop that walks over
  624. // both of the input arrays is correct.
  625. // - Otherwise, one (and only one) of the extents must be 1. Say extent0 is 1
  626. // and extent1 is e1. Then set extent0 to e1 and stride0 *to 0*. This allows
  627. // array0 to be referenced *at any index* in dimension d and still access the
  628. // same slice.
  629. template <int N>
  630. inline void NdArrayDescsForElementwiseBroadcast(const Dims<N>& input0_dims,
  631. const Dims<N>& input1_dims,
  632. NdArrayDesc<N>* desc0_out,
  633. NdArrayDesc<N>* desc1_out) {
  634. TFLITE_DCHECK(desc0_out != nullptr);
  635. TFLITE_DCHECK(desc1_out != nullptr);
  636. // Copy dims to desc.
  637. for (int i = 0; i < N; ++i) {
  638. desc0_out->extents[i] = input0_dims.sizes[i];
  639. desc0_out->strides[i] = input0_dims.strides[i];
  640. desc1_out->extents[i] = input1_dims.sizes[i];
  641. desc1_out->strides[i] = input1_dims.strides[i];
  642. }
  643. // Walk over each dimension. If the extents are equal do nothing.
  644. // Otherwise, set the desc with extent 1 to have extent equal to the other and
  645. // stride 0.
  646. for (int i = 0; i < N; ++i) {
  647. const int extent0 = ArraySize(input0_dims, i);
  648. const int extent1 = ArraySize(input1_dims, i);
  649. if (extent0 != extent1) {
  650. if (extent0 == 1) {
  651. desc0_out->strides[i] = 0;
  652. desc0_out->extents[i] = extent1;
  653. } else {
  654. TFLITE_DCHECK_EQ(extent1, 1);
  655. desc1_out->strides[i] = 0;
  656. desc1_out->extents[i] = extent0;
  657. }
  658. }
  659. }
  660. }
  661. // Copies dims to desc, calculating strides.
  662. template <int N>
  663. inline void CopyDimsToDesc(const RuntimeShape& input_shape,
  664. NdArrayDesc<N>* desc_out) {
  665. int desc_stride = 1;
  666. for (int i = N - 1; i >= 0; --i) {
  667. desc_out->extents[i] = input_shape.Dims(i);
  668. desc_out->strides[i] = desc_stride;
  669. desc_stride *= input_shape.Dims(i);
  670. }
  671. }
  672. template <int N>
  673. inline void NdArrayDescsForElementwiseBroadcast(
  674. const RuntimeShape& input0_shape, const RuntimeShape& input1_shape,
  675. NdArrayDesc<N>* desc0_out, NdArrayDesc<N>* desc1_out) {
  676. TFLITE_DCHECK(desc0_out != nullptr);
  677. TFLITE_DCHECK(desc1_out != nullptr);
  678. auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape);
  679. auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape);
  680. // Copy dims to desc, calculating strides.
  681. CopyDimsToDesc<N>(extended_input0_shape, desc0_out);
  682. CopyDimsToDesc<N>(extended_input1_shape, desc1_out);
  683. // Walk over each dimension. If the extents are equal do nothing.
  684. // Otherwise, set the desc with extent 1 to have extent equal to the other and
  685. // stride 0.
  686. for (int i = 0; i < N; ++i) {
  687. const int extent0 = extended_input0_shape.Dims(i);
  688. const int extent1 = extended_input1_shape.Dims(i);
  689. if (extent0 != extent1) {
  690. if (extent0 == 1) {
  691. desc0_out->strides[i] = 0;
  692. desc0_out->extents[i] = extent1;
  693. } else {
  694. TFLITE_DCHECK_EQ(extent1, 1);
  695. desc1_out->strides[i] = 0;
  696. desc1_out->extents[i] = extent0;
  697. }
  698. }
  699. }
  700. }
  701. template <int N>
  702. inline void NdArrayDescsForElementwiseBroadcast(
  703. const RuntimeShape& input0_shape, const RuntimeShape& input1_shape,
  704. const RuntimeShape& input2_shape, NdArrayDesc<N>* desc0_out,
  705. NdArrayDesc<N>* desc1_out, NdArrayDesc<N>* desc2_out) {
  706. TFLITE_DCHECK(desc0_out != nullptr);
  707. TFLITE_DCHECK(desc1_out != nullptr);
  708. TFLITE_DCHECK(desc2_out != nullptr);
  709. auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape);
  710. auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape);
  711. auto extended_input2_shape = RuntimeShape::ExtendedShape(N, input2_shape);
  712. // Copy dims to desc, calculating strides.
  713. CopyDimsToDesc<N>(extended_input0_shape, desc0_out);
  714. CopyDimsToDesc<N>(extended_input1_shape, desc1_out);
  715. CopyDimsToDesc<N>(extended_input2_shape, desc2_out);
  716. // Walk over each dimension. If the extents are equal do nothing.
  717. // Otherwise, set the desc with extent 1 to have extent equal to the other and
  718. // stride 0.
  719. for (int i = 0; i < N; ++i) {
  720. const int extent0 = extended_input0_shape.Dims(i);
  721. const int extent1 = extended_input1_shape.Dims(i);
  722. const int extent2 = extended_input2_shape.Dims(i);
  723. int extent = extent0;
  724. if (extent1 != 1) extent = extent1;
  725. if (extent2 != 1) extent = extent2;
  726. TFLITE_DCHECK(extent0 == 1 || extent0 == extent);
  727. TFLITE_DCHECK(extent1 == 1 || extent1 == extent);
  728. TFLITE_DCHECK(extent2 == 1 || extent2 == extent);
  729. if (!(extent0 == extent1 && extent1 == extent2)) {
  730. if (extent0 == 1) {
  731. desc0_out->strides[i] = 0;
  732. desc0_out->extents[i] = extent;
  733. }
  734. if (extent1 == 1) {
  735. desc1_out->strides[i] = 0;
  736. desc1_out->extents[i] = extent;
  737. }
  738. if (extent2 == 1) {
  739. desc2_out->strides[i] = 0;
  740. desc2_out->extents[i] = extent;
  741. }
  742. }
  743. }
  744. }
  745. // Detailed implementation of NDOpsHelper, the indexes must be a zero array.
  746. // This implementation is equivalent to N nested loops. Ex, if N=4, it can be
  747. // re-writen as:
  748. // for (int b = 0; b < output.extents[0]; ++b) {
  749. // for (int y = 0; y < output.extents[1]; ++y) {
  750. // for (int x = 0; x < output.extents[2]; ++x) {
  751. // for (int c = 0; c < output.extents[3]; ++c) {
  752. // calc({b,y,x,c});
  753. // }
  754. // }
  755. // }
  756. // }
  757. template <int N, int DIM, typename Calc>
  758. typename std::enable_if<DIM != N - 1, void>::type NDOpsHelperImpl(
  759. const NdArrayDesc<N>& output, const Calc& calc, int indexes[N]) {
  760. for (indexes[DIM] = 0; indexes[DIM] < output.extents[DIM]; ++indexes[DIM]) {
  761. NDOpsHelperImpl<N, DIM + 1, Calc>(output, calc, indexes);
  762. }
  763. }
  764. template <int N, int DIM, typename Calc>
  765. typename std::enable_if<DIM == N - 1, void>::type NDOpsHelperImpl(
  766. const NdArrayDesc<N>& output, const Calc& calc, int indexes[N]) {
  767. for (indexes[DIM] = 0; indexes[DIM] < output.extents[DIM]; ++indexes[DIM]) {
  768. calc(indexes);
  769. }
  770. }
  771. // Execute the calc function in the innermost iteration based on the shape of
  772. // the output. The calc function should take a single argument of type int[N].
  773. template <int N, typename Calc>
  774. inline void NDOpsHelper(const NdArrayDesc<N>& output, const Calc& calc) {
  775. int indexes[N] = {0};
  776. NDOpsHelperImpl<N, 0, Calc>(output, calc, indexes);
  777. }
  778. // Copied from gemmlowp::RoundDown when we dropped direct dependency on
  779. // gemmlowp.
  780. //
  781. // Returns the runtime argument rounded down to the nearest multiple of
  782. // the fixed Modulus.
  783. template <unsigned Modulus, typename Integer>
  784. Integer RoundDown(Integer i) {
  785. return i - (i % Modulus);
  786. }
  787. // Copied from gemmlowp::RoundUp when we dropped direct dependency on
  788. // gemmlowp.
  789. //
  790. // Returns the runtime argument rounded up to the nearest multiple of
  791. // the fixed Modulus.
  792. template <unsigned Modulus, typename Integer>
  793. Integer RoundUp(Integer i) {
  794. return RoundDown<Modulus>(i + Modulus - 1);
  795. }
  796. // Copied from gemmlowp::CeilQuotient when we dropped direct dependency on
  797. // gemmlowp.
  798. //
  799. // Returns the quotient a / b rounded up ('ceil') to the nearest integer.
  800. template <typename Integer>
  801. Integer CeilQuotient(Integer a, Integer b) {
  802. return (a + b - 1) / b;
  803. }
  804. // This function is a copy of gemmlowp::HowManyThreads, copied when we dropped
  805. // the direct dependency of internal/optimized/ on gemmlowp.
  806. //
  807. // It computes a reasonable number of threads to use for a GEMM of shape
  808. // (rows, cols, depth).
  809. //
  810. // TODO(b/131910176): get rid of this function by switching each call site
  811. // to its own more sensible logic for its own workload.
  812. template <int KernelRows>
  813. inline int LegacyHowManyThreads(int max_num_threads, int rows, int cols,
  814. int depth) {
  815. // Early-exit in the default case where multi-threading is disabled.
  816. if (max_num_threads == 1) {
  817. return 1;
  818. }
  819. // Ensure that each thread has KernelRows rows to process, if at all possible.
  820. int thread_count = std::min(max_num_threads, rows / KernelRows);
  821. // Limit the number of threads according to the overall size of the problem.
  822. if (thread_count > 1) {
  823. // Empirically determined value.
  824. static constexpr std::uint64_t min_cubic_size_per_thread = 64 * 1024;
  825. // We can only multiply two out of three sizes without risking overflow
  826. const std::uint64_t cubic_size =
  827. std::uint64_t(rows) * std::uint64_t(cols) * std::uint64_t(depth);
  828. thread_count = std::min(
  829. thread_count, static_cast<int>(cubic_size / min_cubic_size_per_thread));
  830. }
  831. if (thread_count < 1) {
  832. thread_count = 1;
  833. }
  834. assert(thread_count > 0 && thread_count <= max_num_threads);
  835. return thread_count;
  836. }
  837. template <typename T>
  838. void optimized_ops_preload_l1_stream(const T* ptr) {
  839. #ifdef __GNUC__
  840. // builtin offered by GCC-compatible compilers including clang
  841. __builtin_prefetch(ptr, /* 0 means read */ 0, /* 0 means no locality */ 0);
  842. #else
  843. (void)ptr;
  844. #endif
  845. }
  846. template <typename T>
  847. void optimized_ops_preload_l1_keep(const T* ptr) {
  848. #ifdef __GNUC__
  849. // builtin offered by GCC-compatible compilers including clang
  850. __builtin_prefetch(ptr, /* 0 means read */ 0, /* 3 means high locality */ 3);
  851. #else
  852. (void)ptr;
  853. #endif
  854. }
  855. template <typename T>
  856. void optimized_ops_prefetch_write_l1_keep(const T* ptr) {
  857. #ifdef __GNUC__
  858. // builtin offered by GCC-compatible compilers including clang
  859. __builtin_prefetch(ptr, /* 1 means write */ 1, /* 3 means high locality */ 3);
  860. #else
  861. (void)ptr;
  862. #endif
  863. }
  864. } // namespace tflite
  865. #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_