add.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================*/
  12. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
  13. #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
  14. #include "fixedpoint/fixedpoint.h"
  15. #include "tensorflow/lite/kernels/internal/common.h"
  16. namespace tflite {
  17. namespace reference_ops {
  18. template <typename T>
  19. inline void Add(const ArithmeticParams& params,
  20. const RuntimeShape& input1_shape, const T* input1_data,
  21. const RuntimeShape& input2_shape, const T* input2_data,
  22. const RuntimeShape& output_shape, T* output_data) {
  23. const int flat_size =
  24. MatchingElementsSize(input1_shape, input2_shape, output_shape);
  25. for (int i = 0; i < flat_size; ++i) {
  26. output_data[i] = ActivationFunctionWithMinMax(
  27. input1_data[i] + input2_data[i], params.quantized_activation_min,
  28. params.quantized_activation_max);
  29. }
  30. }
  31. inline void Add(const ArithmeticParams& params,
  32. const RuntimeShape& input1_shape, const float* input1_data,
  33. const RuntimeShape& input2_shape, const float* input2_data,
  34. const RuntimeShape& output_shape, float* output_data) {
  35. const int flat_size =
  36. MatchingElementsSize(input1_shape, input2_shape, output_shape);
  37. for (int i = 0; i < flat_size; i++) {
  38. auto x = input1_data[i] + input2_data[i];
  39. output_data[i] = ActivationFunctionWithMinMax(
  40. x, params.float_activation_min, params.float_activation_max);
  41. }
  42. }
  43. // Element-wise add that can often be used for inner loop of broadcast add as
  44. // well as the non-broadcast add.
  45. // This function is used for 8-bit as well as for 16-bit, but the accumulator
  46. // is 32-bit for both cases. The overflow does not happen due to the
  47. // choice of the shift (20 or 15, accordingly - see add.cc for more comments).
  48. template <typename T>
  49. inline void AddElementwise(int size, const ArithmeticParams& params,
  50. const T* input1_data, const T* input2_data,
  51. T* output_data) {
  52. TFLITE_DCHECK_GT(params.input1_offset, -std::numeric_limits<T>::max());
  53. TFLITE_DCHECK_GT(params.input2_offset, -std::numeric_limits<T>::max());
  54. TFLITE_DCHECK_LT(params.input1_offset, std::numeric_limits<T>::max());
  55. TFLITE_DCHECK_LT(params.input2_offset, std::numeric_limits<T>::max());
  56. for (int i = 0; i < size; ++i) {
  57. const int32_t input1_val = params.input1_offset + input1_data[i];
  58. const int32_t input2_val = params.input2_offset + input2_data[i];
  59. const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
  60. const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
  61. const int32_t scaled_input1_val =
  62. MultiplyByQuantizedMultiplierSmallerThanOneExp(
  63. shifted_input1_val, params.input1_multiplier, params.input1_shift);
  64. const int32_t scaled_input2_val =
  65. MultiplyByQuantizedMultiplierSmallerThanOneExp(
  66. shifted_input2_val, params.input2_multiplier, params.input2_shift);
  67. const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
  68. const int32_t raw_output =
  69. MultiplyByQuantizedMultiplierSmallerThanOneExp(
  70. raw_sum, params.output_multiplier, params.output_shift) +
  71. params.output_offset;
  72. const int32_t clamped_output =
  73. std::min(params.quantized_activation_max,
  74. std::max(params.quantized_activation_min, raw_output));
  75. output_data[i] = static_cast<T>(clamped_output);
  76. }
  77. }
  78. // Scalar-broadcast add that can be used for inner loop of more general
  79. // broadcast add, so that, for example, scalar-broadcast with batch will still
  80. // be fast.
  81. inline void AddScalarBroadcast(int size, const ArithmeticParams& params,
  82. uint8_t input1_data, const uint8_t* input2_data,
  83. uint8_t* output_data) {
  84. TFLITE_DCHECK_GT(params.input1_offset, -256);
  85. TFLITE_DCHECK_GT(params.input2_offset, -256);
  86. TFLITE_DCHECK_LT(params.input1_offset, 256);
  87. TFLITE_DCHECK_LT(params.input2_offset, 256);
  88. const int32_t input1_val = params.input1_offset + input1_data;
  89. const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
  90. const int32_t scaled_input1_val =
  91. MultiplyByQuantizedMultiplierSmallerThanOneExp(
  92. shifted_input1_val, params.input1_multiplier, params.input1_shift);
  93. for (int i = 0; i < size; ++i) {
  94. const int32_t input2_val = params.input2_offset + input2_data[i];
  95. const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
  96. const int32_t scaled_input2_val =
  97. MultiplyByQuantizedMultiplierSmallerThanOneExp(
  98. shifted_input2_val, params.input2_multiplier, params.input2_shift);
  99. const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
  100. const int32_t raw_output =
  101. MultiplyByQuantizedMultiplierSmallerThanOneExp(
  102. raw_sum, params.output_multiplier, params.output_shift) +
  103. params.output_offset;
  104. const int32_t clamped_output =
  105. std::min(params.quantized_activation_max,
  106. std::max(params.quantized_activation_min, raw_output));
  107. output_data[i] = static_cast<uint8_t>(clamped_output);
  108. }
  109. }
  110. inline void Add(const ArithmeticParams& params,
  111. const RuntimeShape& input1_shape, const uint8_t* input1_data,
  112. const RuntimeShape& input2_shape, const uint8_t* input2_data,
  113. const RuntimeShape& output_shape, uint8_t* output_data) {
  114. TFLITE_DCHECK_LE(params.quantized_activation_min,
  115. params.quantized_activation_max);
  116. const int flat_size =
  117. MatchingElementsSize(input1_shape, input2_shape, output_shape);
  118. TFLITE_DCHECK_GT(params.input1_offset, -256);
  119. TFLITE_DCHECK_GT(params.input2_offset, -256);
  120. TFLITE_DCHECK_LT(params.input1_offset, 256);
  121. TFLITE_DCHECK_LT(params.input2_offset, 256);
  122. AddElementwise(flat_size, params, input1_data, input2_data, output_data);
  123. }
  124. inline void AddGeneralParamScale(const ArithmeticParams& params,
  125. const RuntimeShape& input1_shape,
  126. const int16_t* input1_data,
  127. const RuntimeShape& input2_shape,
  128. const int16_t* input2_data,
  129. const RuntimeShape& output_shape,
  130. int16_t* output_data) {
  131. TFLITE_DCHECK_LE(params.quantized_activation_min,
  132. params.quantized_activation_max);
  133. const int flat_size =
  134. MatchingElementsSize(input1_shape, input2_shape, output_shape);
  135. int max_value = std::numeric_limits<int16_t>::max();
  136. TFLITE_DCHECK_GT(params.input1_offset, -max_value);
  137. TFLITE_DCHECK_GT(params.input2_offset, -max_value);
  138. TFLITE_DCHECK_LT(params.input1_offset, max_value);
  139. TFLITE_DCHECK_LT(params.input2_offset, max_value);
  140. AddElementwise(flat_size, params, input1_data, input2_data, output_data);
  141. }
  142. inline void Add(const ArithmeticParams& params,
  143. const RuntimeShape& input1_shape, const int16_t* input1_data,
  144. const RuntimeShape& input2_shape, const int16_t* input2_data,
  145. const RuntimeShape& output_shape, int16_t* output_data,
  146. bool pot_scale = true) {
  147. if (!pot_scale) {
  148. AddGeneralParamScale(params, input1_shape, input1_data, input2_shape,
  149. input2_data, output_shape, output_data);
  150. return;
  151. }
  152. TFLITE_DCHECK_LE(params.quantized_activation_min,
  153. params.quantized_activation_max);
  154. const int input1_shift = params.input1_shift;
  155. const int flat_size =
  156. MatchingElementsSize(input1_shape, input2_shape, output_shape);
  157. const int16_t output_activation_min = params.quantized_activation_min;
  158. const int16_t output_activation_max = params.quantized_activation_max;
  159. TFLITE_DCHECK(input1_shift == 0 || params.input2_shift == 0);
  160. TFLITE_DCHECK_LE(input1_shift, 0);
  161. TFLITE_DCHECK_LE(params.input2_shift, 0);
  162. const int16_t* not_shift_input =
  163. input1_shift == 0 ? input1_data : input2_data;
  164. const int16_t* shift_input = input1_shift == 0 ? input2_data : input1_data;
  165. const int input_right_shift =
  166. input1_shift == 0 ? -params.input2_shift : -input1_shift;
  167. for (int i = 0; i < flat_size; i++) {
  168. // F0 uses 0 integer bits, range [-1, 1].
  169. using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
  170. F0 input_ready_scaled = F0::FromRaw(not_shift_input[i]);
  171. F0 scaled_input = F0::FromRaw(
  172. gemmlowp::RoundingDivideByPOT(shift_input[i], input_right_shift));
  173. F0 result = gemmlowp::SaturatingAdd(scaled_input, input_ready_scaled);
  174. const int16_t raw_output = result.raw();
  175. const int16_t clamped_output = std::min(
  176. output_activation_max, std::max(output_activation_min, raw_output));
  177. output_data[i] = clamped_output;
  178. }
  179. }
  180. // TODO(jiawen): We can implement BroadcastAdd on buffers of arbitrary
  181. // dimensionality if the runtime code does a single loop over one dimension
  182. // that handles broadcasting as the base case. The code generator would then
  183. // generate max(D1, D2) nested for loops.
  184. // TODO(benoitjacob): BroadcastAdd is intentionally duplicated from
  185. // reference_ops.h. Once an optimized version is implemented and NdArrayDesc<T>
  186. // is no longer referenced in this file, move NdArrayDesc<T> from types.h to
  187. // reference_ops.h.
  188. inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
  189. const RuntimeShape& input1_shape,
  190. const float* input1_data,
  191. const RuntimeShape& input2_shape,
  192. const float* input2_data,
  193. const RuntimeShape& output_shape,
  194. float* output_data) {
  195. NdArrayDesc<4> desc1;
  196. NdArrayDesc<4> desc2;
  197. NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
  198. &desc2);
  199. const RuntimeShape extended_output_shape =
  200. RuntimeShape::ExtendedShape(4, output_shape);
  201. // In Tensorflow, the dimensions are canonically named (batch_number, row,
  202. // col, channel), with extents (batches, height, width, depth), with the
  203. // trailing dimension changing most rapidly (channels has the smallest stride,
  204. // typically 1 element).
  205. //
  206. // In generated C code, we store arrays with the dimensions reversed. The
  207. // first dimension has smallest stride.
  208. //
  209. // We name our variables by their Tensorflow convention, but generate C code
  210. // nesting loops such that the innermost loop has the smallest stride for the
  211. // best cache behavior.
  212. for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
  213. for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
  214. for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
  215. for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
  216. output_data[Offset(extended_output_shape, b, y, x, c)] =
  217. ActivationFunctionWithMinMax(
  218. input1_data[SubscriptToIndex(desc1, b, y, x, c)] +
  219. input2_data[SubscriptToIndex(desc2, b, y, x, c)],
  220. params.float_activation_min, params.float_activation_max);
  221. }
  222. }
  223. }
  224. }
  225. }
  226. inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
  227. const RuntimeShape& input1_shape,
  228. const int32_t* input1_data,
  229. const RuntimeShape& input2_shape,
  230. const int32_t* input2_data,
  231. const RuntimeShape& output_shape,
  232. int32_t* output_data) {
  233. NdArrayDesc<4> desc1;
  234. NdArrayDesc<4> desc2;
  235. NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
  236. &desc2);
  237. const RuntimeShape extended_output_shape =
  238. RuntimeShape::ExtendedShape(4, output_shape);
  239. // In Tensorflow, the dimensions are canonically named (batch_number, row,
  240. // col, channel), with extents (batches, height, width, depth), with the
  241. // trailing dimension changing most rapidly (channels has the smallest stride,
  242. // typically 1 element).
  243. //
  244. // In generated C code, we store arrays with the dimensions reversed. The
  245. // first dimension has smallest stride.
  246. //
  247. // We name our variables by their Tensorflow convention, but generate C code
  248. // nesting loops such that the innermost loop has the smallest stride for the
  249. // best cache behavior.
  250. for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
  251. for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
  252. for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
  253. for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
  254. output_data[Offset(extended_output_shape, b, y, x, c)] =
  255. ActivationFunctionWithMinMax(
  256. input1_data[SubscriptToIndex(desc1, b, y, x, c)] +
  257. input2_data[SubscriptToIndex(desc2, b, y, x, c)],
  258. params.quantized_activation_min,
  259. params.quantized_activation_max);
  260. }
  261. }
  262. }
  263. }
  264. }
  265. // This function is used for 8-bit as well as for 16-bit, but the accumulator
  266. // is 32-bit for both cases. The overflow does not happen due to the
  267. // choice of the shift (20 or 15, accordingly - see add.cc for more comments).
  268. template <typename T>
  269. inline void BroadcastAdd4DSlow(
  270. const ArithmeticParams& params, const RuntimeShape& input1_shape,
  271. const T* input1_data, const RuntimeShape& input2_shape,
  272. const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
  273. NdArrayDesc<4> desc1;
  274. NdArrayDesc<4> desc2;
  275. NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
  276. &desc2);
  277. const RuntimeShape extended_output_shape =
  278. RuntimeShape::ExtendedShape(4, output_shape);
  279. // In Tensorflow, the dimensions are canonically named (batch_number, row,
  280. // col, channel), with extents (batches, height, width, depth), with the
  281. // trailing dimension changing most rapidly (channels has the smallest stride,
  282. // typically 1 element).
  283. //
  284. // In generated C code, we store arrays with the dimensions reversed. The
  285. // first dimension has smallest stride.
  286. //
  287. // We name our variables by their Tensorflow convention, but generate C code
  288. // nesting loops such that the innermost loop has the smallest stride for the
  289. // best cache behavior.
  290. for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
  291. for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
  292. for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
  293. for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
  294. const int32_t input1_val =
  295. params.input1_offset +
  296. input1_data[SubscriptToIndex(desc1, b, y, x, c)];
  297. const int32_t input2_val =
  298. params.input2_offset +
  299. input2_data[SubscriptToIndex(desc2, b, y, x, c)];
  300. const int32_t shifted_input1_val =
  301. input1_val * (1 << params.left_shift);
  302. const int32_t shifted_input2_val =
  303. input2_val * (1 << params.left_shift);
  304. const int32_t scaled_input1_val =
  305. MultiplyByQuantizedMultiplierSmallerThanOneExp(
  306. shifted_input1_val, params.input1_multiplier,
  307. params.input1_shift);
  308. const int32_t scaled_input2_val =
  309. MultiplyByQuantizedMultiplierSmallerThanOneExp(
  310. shifted_input2_val, params.input2_multiplier,
  311. params.input2_shift);
  312. const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
  313. const int32_t raw_output =
  314. MultiplyByQuantizedMultiplierSmallerThanOneExp(
  315. raw_sum, params.output_multiplier, params.output_shift) +
  316. params.output_offset;
  317. const int32_t clamped_output =
  318. std::min(params.quantized_activation_max,
  319. std::max(params.quantized_activation_min, raw_output));
  320. output_data[Offset(extended_output_shape, b, y, x, c)] =
  321. static_cast<T>(clamped_output);
  322. }
  323. }
  324. }
  325. }
  326. }
  327. inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
  328. const RuntimeShape& unswitched_input1_shape,
  329. const uint8_t* unswitched_input1_data,
  330. const RuntimeShape& unswitched_input2_shape,
  331. const uint8_t* unswitched_input2_data,
  332. const RuntimeShape& output_shape,
  333. uint8_t* output_data) {
  334. ArithmeticParams switched_params = unswitched_params;
  335. switched_params.input1_offset = unswitched_params.input2_offset;
  336. switched_params.input1_multiplier = unswitched_params.input2_multiplier;
  337. switched_params.input1_shift = unswitched_params.input2_shift;
  338. switched_params.input2_offset = unswitched_params.input1_offset;
  339. switched_params.input2_multiplier = unswitched_params.input1_multiplier;
  340. switched_params.input2_shift = unswitched_params.input1_shift;
  341. const bool use_unswitched =
  342. unswitched_params.broadcast_category ==
  343. tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
  344. const ArithmeticParams& params =
  345. use_unswitched ? unswitched_params : switched_params;
  346. const uint8_t* input1_data =
  347. use_unswitched ? unswitched_input1_data : unswitched_input2_data;
  348. const uint8_t* input2_data =
  349. use_unswitched ? unswitched_input2_data : unswitched_input1_data;
  350. // Fivefold nested loops. The second input resets its position for each
  351. // iteration of the second loop. The first input resets its position at the
  352. // beginning of the fourth loop. The innermost loop is an elementwise add of
  353. // sections of the arrays.
  354. uint8_t* output_data_ptr = output_data;
  355. const uint8_t* input1_data_ptr = input1_data;
  356. const uint8_t* input2_data_reset = input2_data;
  357. // In the fivefold pattern, y0, y2 and y4 are not broadcast, and so shared
  358. // between input shapes. y3 for input 1 is always broadcast, and so the
  359. // dimension there is 1, whereas optionally y1 might be broadcast for input 2.
  360. // Put another way,
  361. // input1.shape.FlatSize = y0 * y1 * y2 * y4,
  362. // input2.shape.FlatSize = y0 * y2 * y3 * y4.
  363. int y0 = params.broadcast_shape[0];
  364. int y1 = params.broadcast_shape[1];
  365. int y2 = params.broadcast_shape[2];
  366. int y3 = params.broadcast_shape[3];
  367. int y4 = params.broadcast_shape[4];
  368. if (y4 > 1) {
  369. // General fivefold pattern, with y4 > 1 so there is a non-broadcast inner
  370. // dimension.
  371. for (int i0 = 0; i0 < y0; ++i0) {
  372. const uint8_t* input2_data_ptr;
  373. for (int i1 = 0; i1 < y1; ++i1) {
  374. input2_data_ptr = input2_data_reset;
  375. for (int i2 = 0; i2 < y2; ++i2) {
  376. for (int i3 = 0; i3 < y3; ++i3) {
  377. AddElementwise(y4, params, input1_data_ptr, input2_data_ptr,
  378. output_data_ptr);
  379. input2_data_ptr += y4;
  380. output_data_ptr += y4;
  381. }
  382. // We have broadcast y4 of input1 data y3 times, and now move on.
  383. input1_data_ptr += y4;
  384. }
  385. }
  386. // We have broadcast y2*y3*y4 of input2 data y1 times, and now move on.
  387. input2_data_reset = input2_data_ptr;
  388. }
  389. } else {
  390. // Special case of y4 == 1, in which the innermost loop is a single element
  391. // and can be combined with the next (y3) as an inner broadcast.
  392. //
  393. // Note that this handles the case of pure scalar broadcast when
  394. // y0 == y1 == y2 == 1. With low overhead it handles cases such as scalar
  395. // broadcast with batch (as y2 > 1).
  396. //
  397. // NOTE The process is the same as the above general case except simplified
  398. // for y4 == 1 and the loop over y3 is contained within the
  399. // AddScalarBroadcast function.
  400. for (int i0 = 0; i0 < y0; ++i0) {
  401. const uint8_t* input2_data_ptr;
  402. for (int i1 = 0; i1 < y1; ++i1) {
  403. input2_data_ptr = input2_data_reset;
  404. for (int i2 = 0; i2 < y2; ++i2) {
  405. AddScalarBroadcast(y3, params, *input1_data_ptr, input2_data_ptr,
  406. output_data_ptr);
  407. input2_data_ptr += y3;
  408. output_data_ptr += y3;
  409. input1_data_ptr += 1;
  410. }
  411. }
  412. input2_data_reset = input2_data_ptr;
  413. }
  414. }
  415. }
  416. } // namespace reference_ops
  417. } // namespace tflite
  418. #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_