arm_convolve_s8.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. /* ----------------------------------------------------------------------
  19. * Project: CMSIS NN Library
  20. * Title: arm_convolve_s8.c
  21. * Description: s8 version of convolution using symmetric quantization.
  22. *
  23. * $Date: January 26, 2021
  24. * $Revision: V.2.0.4
  25. *
  26. * Target Processor: Cortex-M cores
  27. *
  28. * -------------------------------------------------------------------- */
  29. #include "arm_nnfunctions.h"
  30. #include "arm_nnsupportfunctions.h"
  31. /**
  32. * @ingroup groupNN
  33. */
  34. /**
  35. * @addtogroup NNConv
  36. * @{
  37. */
  38. /*
  39. * Basic s8 convolution function.
  40. *
  41. * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels
  42. * are multiples of 4 or atleast greater than 4.
  43. *
  44. */
  45. arm_status arm_convolve_s8(const cmsis_nn_context *ctx,
  46. const cmsis_nn_conv_params *conv_params,
  47. const cmsis_nn_per_channel_quant_params *quant_params,
  48. const cmsis_nn_dims *input_dims,
  49. const q7_t *input_data,
  50. const cmsis_nn_dims *filter_dims,
  51. const q7_t *filter_data,
  52. const cmsis_nn_dims *bias_dims,
  53. const int32_t *bias_data,
  54. const cmsis_nn_dims *output_dims,
  55. q7_t *output_data)
  56. {
  57. (void)bias_dims;
  58. q15_t *buffer_a = (q15_t *)ctx->buf;
  59. const uint16_t input_batches = input_dims->n;
  60. const uint16_t input_x = input_dims->w;
  61. const uint16_t input_y = input_dims->h;
  62. const uint16_t input_ch = input_dims->c;
  63. const uint16_t kernel_x = filter_dims->w;
  64. const uint16_t kernel_y = filter_dims->h;
  65. const uint16_t output_x = output_dims->w;
  66. const uint16_t output_y = output_dims->h;
  67. const uint16_t output_ch = output_dims->c;
  68. const uint16_t pad_x = conv_params->padding.w;
  69. const uint16_t pad_y = conv_params->padding.h;
  70. const uint16_t stride_x = conv_params->stride.w;
  71. const uint16_t stride_y = conv_params->stride.h;
  72. const int32_t input_offset = conv_params->input_offset;
  73. const int32_t out_offset = conv_params->output_offset;
  74. const int32_t out_activation_min = conv_params->activation.min;
  75. const int32_t out_activation_max = conv_params->activation.max;
  76. int32_t *output_mult = quant_params->multiplier;
  77. int32_t *output_shift = quant_params->shift;
  78. int i_batch;
  79. for (i_batch = 0; i_batch < input_batches; i_batch++)
  80. {
  81. #if defined(ARM_MATH_MVEI)
  82. /* Generate upto four columns from the input tensor a GEMM computation */
  83. q7_t *im2col_buf = (q7_t *)buffer_a;
  84. q7_t *out = output_data;
  85. int32_t buffer_fill_cnt = 0;
  86. int32_t padded = 0;
  87. const int32_t num_elem = kernel_x * kernel_y * input_ch;
  88. /* This part implements the im2col function */
  89. for (int i_out_y = 0; i_out_y < output_y; i_out_y++)
  90. {
  91. for (int i_out_x = 0; i_out_x < output_x; i_out_x++)
  92. {
  93. for (int i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y;
  94. i_ker_y++)
  95. {
  96. for (int i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x;
  97. i_ker_x++)
  98. {
  99. if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x)
  100. {
  101. memset(im2col_buf, (int8_t)-input_offset, sizeof(q7_t) * input_ch);
  102. padded = 1;
  103. }
  104. else
  105. {
  106. arm_memcpy_q7(im2col_buf, input_data + (i_ker_y * input_x + i_ker_x) * input_ch, input_ch);
  107. }
  108. im2col_buf += input_ch;
  109. }
  110. }
  111. buffer_fill_cnt++;
  112. /* Computation is filed for every 4 columns */
  113. if (buffer_fill_cnt == 4 && (padded == 0))
  114. {
  115. buffer_fill_cnt = 0;
  116. for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++)
  117. {
  118. int32_t sum_row;
  119. int32_t acc[4];
  120. (void)arm_nn_mat_mul_core_4x_s8(
  121. num_elem, num_elem, (q7_t *)buffer_a, filter_data + num_elem * i_out_ch, &sum_row, acc);
  122. int32x4_t s_offset = vdupq_n_s32(sum_row);
  123. int32x4_t res = vldrwq_s32(acc);
  124. s_offset = vmulq_n_s32(s_offset, input_offset);
  125. if (bias_data)
  126. {
  127. res = vaddq_n_s32(res, bias_data[i_out_ch]);
  128. }
  129. res = vaddq_s32(res, s_offset);
  130. res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]);
  131. res = vaddq_n_s32(res, out_offset);
  132. res = vmaxq_s32(res, vdupq_n_s32(out_activation_min));
  133. res = vminq_s32(res, vdupq_n_s32(out_activation_max));
  134. const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3};
  135. vstrbq_scatter_offset_s32(out, scatter_offset, res);
  136. out++;
  137. }
  138. out += (3 * output_ch);
  139. im2col_buf = (q7_t *)buffer_a;
  140. }
  141. else if (buffer_fill_cnt == 4 && (padded != 0))
  142. {
  143. buffer_fill_cnt = 0;
  144. out = arm_nn_mat_mult_s8(filter_data,
  145. (q7_t *)buffer_a,
  146. output_ch,
  147. 4,
  148. output_shift,
  149. output_mult,
  150. out_offset,
  151. input_offset,
  152. 0,
  153. out_activation_min,
  154. out_activation_max,
  155. num_elem,
  156. bias_data,
  157. out);
  158. im2col_buf = (q7_t *)buffer_a;
  159. padded = 0;
  160. }
  161. }
  162. }
  163. /* Handle left over columns */
  164. if (buffer_fill_cnt != 0)
  165. {
  166. out = arm_nn_mat_mult_s8(filter_data,
  167. (q7_t *)buffer_a,
  168. output_ch,
  169. buffer_fill_cnt,
  170. output_shift,
  171. output_mult,
  172. out_offset,
  173. input_offset,
  174. 0,
  175. out_activation_min,
  176. out_activation_max,
  177. num_elem,
  178. bias_data,
  179. out);
  180. }
  181. #elif defined(ARM_MATH_DSP)
  182. int32_t i_out_y, i_out_x, i_ker_y, i_ker_x;
  183. /* Generate two columns from the input tensor a GEMM computation */
  184. q15_t *two_column_buf = buffer_a;
  185. q7_t *out = output_data;
  186. /* This part implements the im2col function */
  187. for (i_out_y = 0; i_out_y < output_y; i_out_y++)
  188. {
  189. for (i_out_x = 0; i_out_x < output_x; i_out_x++)
  190. {
  191. for (i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y; i_ker_y++)
  192. {
  193. for (i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x;
  194. i_ker_x++)
  195. {
  196. if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x)
  197. {
  198. /* Filling 0 for out-of-bound paddings */
  199. memset(two_column_buf, 0, sizeof(q15_t) * input_ch);
  200. }
  201. else
  202. {
  203. /* Copying the pixel data to column */
  204. arm_q7_to_q15_with_offset(input_data + (i_ker_y * input_x + i_ker_x) * input_ch,
  205. two_column_buf,
  206. input_ch,
  207. input_offset);
  208. }
  209. two_column_buf += input_ch;
  210. }
  211. }
  212. /* Computation is filed for every 2 columns */
  213. if (two_column_buf == buffer_a + 2 * input_ch * kernel_y * kernel_x)
  214. {
  215. out = arm_nn_mat_mult_kernel_s8_s16(filter_data,
  216. buffer_a,
  217. output_ch,
  218. output_shift,
  219. output_mult,
  220. out_offset,
  221. out_activation_min,
  222. out_activation_max,
  223. input_ch * kernel_y * kernel_x,
  224. bias_data,
  225. out);
  226. /* counter reset */
  227. two_column_buf = buffer_a;
  228. }
  229. }
  230. }
  231. /* left-over because odd number of output pixels */
  232. if (two_column_buf != buffer_a)
  233. {
  234. const q7_t *ker_a = filter_data;
  235. int i;
  236. for (i = 0; i < output_ch; i++)
  237. {
  238. /* Load the accumulator with bias first */
  239. q31_t sum = 0;
  240. if (bias_data)
  241. {
  242. sum = bias_data[i];
  243. }
  244. /* Point to the beginning of the im2col buffer where the input is available as a rearranged column */
  245. const q15_t *ip_as_col = buffer_a;
  246. /* 4 multiply and accumulates are done in one loop. */
  247. uint16_t col_count = (input_ch * kernel_y * kernel_x) >> 2;
  248. while (col_count)
  249. {
  250. q31_t ker_a1, ker_a2;
  251. q31_t ip_b1, ip_b2;
  252. ker_a = read_and_pad(ker_a, &ker_a1, &ker_a2);
  253. ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col);
  254. sum = __SMLAD(ker_a1, ip_b1, sum);
  255. ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col);
  256. sum = __SMLAD(ker_a2, ip_b2, sum);
  257. col_count--;
  258. }
  259. /* Handle left over mac */
  260. col_count = input_ch * kernel_y * kernel_x & 0x3;
  261. while (col_count)
  262. {
  263. q7_t ker_a1 = *ker_a++;
  264. q15_t ip_b1 = *ip_as_col++;
  265. sum += ker_a1 * ip_b1;
  266. col_count--;
  267. }
  268. sum = arm_nn_requantize(sum, output_mult[i], output_shift[i]);
  269. sum += out_offset;
  270. sum = MAX(sum, out_activation_min);
  271. sum = MIN(sum, out_activation_max);
  272. *out++ = (q7_t)sum;
  273. }
  274. }
  275. #else
  276. /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */
  277. (void)buffer_a;
  278. int32_t i_out_ch, i_out_y, i_out_x, i_input_ch, i_ker_y, i_ker_x;
  279. int32_t conv_out;
  280. for (i_out_ch = 0; i_out_ch < output_ch; i_out_ch++)
  281. {
  282. for (i_out_y = 0; i_out_y < output_y; i_out_y++)
  283. {
  284. for (i_out_x = 0; i_out_x < output_x; i_out_x++)
  285. {
  286. conv_out = 0;
  287. const int32_t base_idx_y = stride_y * i_out_y - pad_y;
  288. const int32_t base_idx_x = stride_x * i_out_x - pad_x;
  289. const int32_t ker_y_start = MAX(0, -base_idx_y);
  290. const int32_t ker_x_start = MAX(0, -base_idx_x);
  291. const int32_t ker_y_end = MIN(kernel_y, input_y - base_idx_y);
  292. const int32_t ker_x_end = MIN(kernel_x, input_x - base_idx_x);
  293. for (i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++)
  294. {
  295. for (i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++)
  296. {
  297. const int32_t in_row = base_idx_y + i_ker_y;
  298. const int32_t in_col = base_idx_x + i_ker_x;
  299. for (i_input_ch = 0; i_input_ch < input_ch; i_input_ch++)
  300. {
  301. conv_out +=
  302. (input_data[(in_row * input_x + in_col) * input_ch + i_input_ch] + input_offset) *
  303. filter_data[i_out_ch * input_ch * kernel_y * kernel_x +
  304. (i_ker_y * kernel_x + i_ker_x) * input_ch + i_input_ch];
  305. }
  306. }
  307. }
  308. if (bias_data)
  309. {
  310. conv_out += bias_data[i_out_ch];
  311. }
  312. conv_out = arm_nn_requantize(conv_out, output_mult[i_out_ch], output_shift[i_out_ch]);
  313. conv_out += out_offset;
  314. conv_out = MAX(conv_out, out_activation_min);
  315. conv_out = MIN(conv_out, out_activation_max);
  316. output_data[i_out_ch + (i_out_y * output_x + i_out_x) * output_ch] = (int8_t)conv_out;
  317. }
  318. }
  319. }
  320. #endif
  321. /* Advance to the next batch */
  322. input_data += (input_x * input_y * input_ch);
  323. output_data += (output_x * output_y * output_ch);
  324. }
  325. /* Return to application */
  326. return ARM_MATH_SUCCESS;
  327. }
  328. int32_t arm_convolve_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims)
  329. {
  330. #if defined(ARM_MATH_DSP)
  331. return (2 * input_dims->c * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int16_t);
  332. #else
  333. (void)input_dims;
  334. (void)filter_dims;
  335. return 0;
  336. #endif
  337. }
  338. /**
  339. * @} end of NNConv group
  340. */