arm_convolve_s8.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. /* ----------------------------------------------------------------------
  19. * Project: CMSIS NN Library
  20. * Title: arm_convolve_s8.c
  21. * Description: s8 version of convolution using symmetric quantization.
  22. *
  23. * $Date: March 1, 2020
  24. * $Revision: V.1.0.0
  25. *
  26. * Target Processor: Cortex-M cores
  27. *
  28. * -------------------------------------------------------------------- */
  29. #include "arm_math.h"
  30. #include "arm_nnfunctions.h"
  31. #include "arm_nnsupportfunctions.h"
  32. /**
  33. * @ingroup groupNN
  34. */
  35. /**
  36. * @addtogroup NNConv
  37. * @{
  38. */
  39. /*
  40. * Basic s8 convolution function.
  41. *
  42. * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels
  43. * are multiples of 4 or atleast greater than 4.
  44. *
  45. */
  46. arm_status arm_convolve_s8(const q7_t *input,
  47. const uint16_t input_x,
  48. const uint16_t input_y,
  49. const uint16_t input_ch,
  50. const uint16_t input_batches,
  51. const q7_t *kernel,
  52. const uint16_t output_ch,
  53. const uint16_t kernel_x,
  54. const uint16_t kernel_y,
  55. const uint16_t pad_x,
  56. const uint16_t pad_y,
  57. const uint16_t stride_x,
  58. const uint16_t stride_y,
  59. const int32_t *bias,
  60. q7_t *output,
  61. const int32_t *output_shift,
  62. const int32_t *output_mult,
  63. const int32_t out_offset,
  64. const int32_t input_offset,
  65. const int32_t out_activation_min,
  66. const int32_t out_activation_max,
  67. const uint16_t output_x,
  68. const uint16_t output_y,
  69. q15_t *buffer_a)
  70. {
  71. int i_batch;
  72. for (i_batch = 0; i_batch < input_batches; i_batch++)
  73. {
  74. input += i_batch * (input_x * input_y * input_ch);
  75. output += i_batch * (output_x * output_y * output_ch);
  76. #if defined(ARM_MATH_MVEI)
  77. /* Generate upto four columns from the input tensor a GEMM computation */
  78. q7_t *im2col_buf = (q7_t *)buffer_a;
  79. q7_t *out = output;
  80. int32_t buffer_fill_cnt = 0;
  81. int32_t padded = 0;
  82. const int32_t num_elem = kernel_x * kernel_y * input_ch;
  83. /* This part implements the im2col function */
  84. for (int i_out_y = 0; i_out_y < output_y; i_out_y++)
  85. {
  86. for (int i_out_x = 0; i_out_x < output_x; i_out_x++)
  87. {
  88. for (int i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y; i_ker_y++)
  89. {
  90. for (int i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x; i_ker_x++)
  91. {
  92. if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x)
  93. {
  94. memset(im2col_buf, (int8_t)-input_offset, sizeof(q7_t) * input_ch);
  95. padded = 1;
  96. }
  97. else
  98. {
  99. arm_memcpy_q7(im2col_buf, input + (i_ker_y * input_x + i_ker_x) * input_ch, input_ch);
  100. }
  101. im2col_buf += input_ch;
  102. }
  103. }
  104. buffer_fill_cnt++;
  105. /* Computation is filed for every 4 columns */
  106. if (buffer_fill_cnt == 4 && (padded == 0))
  107. {
  108. buffer_fill_cnt = 0;
  109. for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++)
  110. {
  111. int32_t sum_row;
  112. int32_t acc[4];
  113. (void)arm_nn_mat_mul_core_4x_s8(num_elem,
  114. num_elem,
  115. (q7_t *)buffer_a,
  116. kernel + num_elem * i_out_ch,
  117. &sum_row,
  118. acc);
  119. int32x4_t s_offset = vdupq_n_s32(sum_row);
  120. int32x4_t res = vldrwq_s32(acc);
  121. s_offset = vmulq_n_s32(s_offset, input_offset);
  122. res = vaddq_n_s32(res, bias[i_out_ch]);
  123. res = vaddq_s32(res, s_offset);
  124. res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]);
  125. res = vaddq_n_s32(res, out_offset);
  126. res = vmaxq_s32(res, vdupq_n_s32(out_activation_min));
  127. res = vminq_s32(res, vdupq_n_s32(out_activation_max));
  128. const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3};
  129. vstrbq_scatter_offset_s32(out, scatter_offset, res);
  130. out++;
  131. }
  132. out += (3 * output_ch);
  133. im2col_buf = (q7_t *)buffer_a;
  134. }
  135. else if (buffer_fill_cnt == 4 && (padded != 0))
  136. {
  137. buffer_fill_cnt = 0;
  138. out = arm_nn_mat_mult_s8(kernel,
  139. (q7_t *)buffer_a,
  140. output_ch,
  141. 4,
  142. output_shift,
  143. output_mult,
  144. out_offset,
  145. input_offset,
  146. 0,
  147. out_activation_min,
  148. out_activation_max,
  149. num_elem,
  150. bias,
  151. out);
  152. im2col_buf = (q7_t *)buffer_a;
  153. padded = 0;
  154. }
  155. }
  156. }
  157. /* Handle left over columns */
  158. if (buffer_fill_cnt != 0)
  159. {
  160. out = arm_nn_mat_mult_s8(kernel,
  161. (q7_t *)buffer_a,
  162. output_ch,
  163. buffer_fill_cnt,
  164. output_shift,
  165. output_mult,
  166. out_offset,
  167. input_offset,
  168. 0,
  169. out_activation_min,
  170. out_activation_max,
  171. num_elem,
  172. bias,
  173. out);
  174. }
  175. #elif defined(ARM_MATH_DSP)
  176. int32_t i_out_y, i_out_x, i_ker_y, i_ker_x;
  177. /* Generate two columns from the input tensor a GEMM computation */
  178. q15_t *two_column_buf = buffer_a;
  179. q7_t *out = output;
  180. /* This part implements the im2col function */
  181. for (i_out_y = 0; i_out_y < output_y; i_out_y++)
  182. {
  183. for (i_out_x = 0; i_out_x < output_x; i_out_x++)
  184. {
  185. for (i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y; i_ker_y++)
  186. {
  187. for (i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x; i_ker_x++)
  188. {
  189. if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x)
  190. {
  191. /* Filling 0 for out-of-bound paddings */
  192. memset(two_column_buf, 0, sizeof(q15_t) * input_ch);
  193. }
  194. else
  195. {
  196. /* Copying the pixel data to column */
  197. arm_q7_to_q15_with_offset(input + (i_ker_y * input_x + i_ker_x) * input_ch, two_column_buf, input_ch, input_offset);
  198. }
  199. two_column_buf += input_ch;
  200. }
  201. }
  202. /* Computation is filed for every 2 columns */
  203. if (two_column_buf == buffer_a + 2 * input_ch * kernel_y * kernel_x)
  204. {
  205. out =
  206. arm_nn_mat_mult_kernel_s8_s16(kernel,
  207. buffer_a,
  208. output_ch,
  209. output_shift,
  210. output_mult,
  211. out_offset,
  212. out_activation_min,
  213. out_activation_max,
  214. input_ch * kernel_y * kernel_x,
  215. bias,
  216. out);
  217. /* counter reset */
  218. two_column_buf = buffer_a;
  219. }
  220. }
  221. }
  222. /* left-over because odd number of output pixels */
  223. if (two_column_buf != buffer_a)
  224. {
  225. const q7_t *ker_a = kernel;
  226. int i;
  227. for (i = 0; i < output_ch; i++)
  228. {
  229. /* Load the accumulator with bias first */
  230. q31_t sum = bias[i];
  231. /* Point to the beginning of the im2col buffer where the input is available as a rearranged column */
  232. const q15_t *ip_as_col = buffer_a;
  233. /* 4 multiply and accumulates are done in one loop. */
  234. uint16_t col_count = (input_ch * kernel_y * kernel_x) >> 2;
  235. while (col_count)
  236. {
  237. q31_t ker_a1, ker_a2;
  238. q31_t ip_b1, ip_b2;
  239. ker_a = read_and_pad(ker_a, &ker_a1, &ker_a2);
  240. ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col);
  241. sum = __SMLAD(ker_a1, ip_b1, sum);
  242. ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col);
  243. sum = __SMLAD(ker_a2, ip_b2, sum);
  244. col_count--;
  245. }
  246. /* Handle left over mac */
  247. col_count = input_ch * kernel_y * kernel_x & 0x3;
  248. while (col_count)
  249. {
  250. q7_t ker_a1 = *ker_a++;
  251. q15_t ip_b1 = *ip_as_col++;
  252. sum += ker_a1 * ip_b1;
  253. col_count--;
  254. }
  255. sum = arm_nn_requantize(sum, output_mult[i], output_shift[i]);
  256. sum += out_offset;
  257. sum = MAX(sum, out_activation_min);
  258. sum = MIN(sum, out_activation_max);
  259. *out++ = (q7_t)sum;
  260. }
  261. }
  262. #else
  263. /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */
  264. (void)buffer_a;
  265. int32_t i_out_ch, i_out_y, i_out_x, i_input_ch, i_ker_y, i_ker_x;
  266. int32_t conv_out;
  267. for (i_out_ch = 0; i_out_ch < output_ch; i_out_ch++)
  268. {
  269. for (i_out_y = 0; i_out_y < output_y; i_out_y++)
  270. {
  271. for (i_out_x = 0; i_out_x < output_x; i_out_x++)
  272. {
  273. conv_out = bias[i_out_ch];
  274. const int32_t base_idx_y = stride_y * i_out_y - pad_y;
  275. const int32_t base_idx_x = stride_x * i_out_x - pad_x;
  276. const int32_t ker_y_start = MAX(0, -base_idx_y);
  277. const int32_t ker_x_start = MAX(0, -base_idx_x);
  278. const int32_t ker_y_end = MIN(kernel_y, input_y - base_idx_y);
  279. const int32_t ker_x_end = MIN(kernel_x, input_x - base_idx_x);
  280. for (i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++)
  281. {
  282. for (i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++)
  283. {
  284. const int32_t in_row = base_idx_y + i_ker_y;
  285. const int32_t in_col = base_idx_x + i_ker_x;
  286. for (i_input_ch = 0; i_input_ch < input_ch; i_input_ch++)
  287. {
  288. conv_out +=
  289. (input[(in_row * input_x + in_col) * input_ch + i_input_ch] + input_offset) *
  290. kernel[i_out_ch * input_ch * kernel_y * kernel_x +
  291. (i_ker_y * kernel_x + i_ker_x) * input_ch + i_input_ch];
  292. }
  293. }
  294. }
  295. conv_out = arm_nn_requantize(conv_out, output_mult[i_out_ch], output_shift[i_out_ch]);
  296. conv_out += out_offset;
  297. conv_out = MAX(conv_out, out_activation_min);
  298. conv_out = MIN(conv_out, out_activation_max);
  299. output[i_out_ch + (i_out_y * output_x + i_out_x) * output_ch] = (int8_t)conv_out;
  300. }
  301. }
  302. }
  303. #endif
  304. }
  305. /* Return to application */
  306. return ARM_MATH_SUCCESS;
  307. }
  308. int32_t arm_convolve_s8_get_buffer_size(const uint16_t input_ch,
  309. const uint16_t kernel_x,
  310. const uint16_t kernel_y)
  311. {
  312. #if defined(ARM_MATH_DSP)
  313. return (2 * input_ch * kernel_x * kernel_y) * sizeof(int16_t);
  314. #else
  315. (void)input_ch;
  316. (void)kernel_x;
  317. (void)kernel_y;
  318. return 0;
  319. #endif
  320. }
  321. /**
  322. * @} end of NNConv group
  323. */