arm_convolve_fast_s16.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /*
  2. * Copyright (C) 2010-2021 Arm Limited or its affiliates.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. /* ----------------------------------------------------------------------
  19. * Project: CMSIS NN Library
  20. * Title: arm_convolve_fast_s16.c
  21. * Description: Optimized s16 version of convolution.
  22. *
  23. * $Date: 12 August 2021
  24. * $Revision: V.1.1.0
  25. *
  26. * Target Processor: Cortex-M cores
  27. *
  28. * -------------------------------------------------------------------- */
  29. #include "arm_nnfunctions.h"
  30. #include "arm_nnsupportfunctions.h"
  31. /**
  32. * @ingroup groupNN
  33. */
  34. /**
  35. * @addtogroup NNConv
  36. * @{
  37. */
  38. /*
  39. * Basic s16 convolution function.
  40. *
  41. * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels
  42. * are multiples of 4 or atleast greater than 4.
  43. *
  44. */
  45. arm_status arm_convolve_fast_s16(const cmsis_nn_context *ctx,
  46. const cmsis_nn_conv_params *conv_params,
  47. const cmsis_nn_per_channel_quant_params *quant_params,
  48. const cmsis_nn_dims *input_dims,
  49. const q15_t *input_data,
  50. const cmsis_nn_dims *filter_dims,
  51. const q7_t *filter_data,
  52. const cmsis_nn_dims *bias_dims,
  53. const int64_t *bias_data,
  54. const cmsis_nn_dims *output_dims,
  55. q15_t *output_data)
  56. {
  57. (void)bias_dims;
  58. if (filter_dims->w * filter_dims->h * input_dims->c >= 512)
  59. {
  60. return ARM_MATH_SIZE_MISMATCH;
  61. }
  62. if (ctx->buf == NULL && arm_convolve_s8_get_buffer_size(input_dims, filter_dims) > 0)
  63. {
  64. return ARM_MATH_ARGUMENT_ERROR;
  65. }
  66. q15_t *buffer_a = (q15_t *)ctx->buf;
  67. const int32_t input_batches = input_dims->n;
  68. const int32_t input_x = input_dims->w;
  69. const int32_t input_y = input_dims->h;
  70. const int32_t input_ch = input_dims->c;
  71. const int32_t kernel_x = filter_dims->w;
  72. const int32_t kernel_y = filter_dims->h;
  73. const int32_t output_x = output_dims->w;
  74. const int32_t output_y = output_dims->h;
  75. const int32_t output_ch = output_dims->c;
  76. const int32_t pad_x = conv_params->padding.w;
  77. const int32_t pad_y = conv_params->padding.h;
  78. const int32_t stride_x = conv_params->stride.w;
  79. const int32_t stride_y = conv_params->stride.h;
  80. const int16_t out_activation_min = conv_params->activation.min;
  81. const int16_t out_activation_max = conv_params->activation.max;
  82. int32_t *output_mult = quant_params->multiplier;
  83. int32_t *output_shift = quant_params->shift;
  84. for (int i_batch = 0; i_batch < input_batches; i_batch++)
  85. {
  86. #if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI)
  87. /* Generate two columns from the input tensor a GEMM computation */
  88. q15_t *two_column_buf = buffer_a;
  89. q15_t *out = output_data;
  90. /* This part implements the im2col function */
  91. for (int32_t i_out_y = 0; i_out_y < output_y; i_out_y++)
  92. {
  93. for (int32_t i_out_x = 0; i_out_x < output_x; i_out_x++)
  94. {
  95. for (int32_t i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y;
  96. i_ker_y++)
  97. {
  98. for (int32_t i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x;
  99. i_ker_x++)
  100. {
  101. if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x)
  102. {
  103. /* Filling 0 for out-of-bound paddings */
  104. arm_memset_q7((q7_t *)two_column_buf, 0, sizeof(q15_t) * input_ch);
  105. }
  106. else
  107. {
  108. arm_memcpy_q7((q7_t *)two_column_buf,
  109. (const q7_t *)(input_data + (i_ker_y * input_x + i_ker_x) * input_ch),
  110. input_ch * sizeof(q15_t));
  111. }
  112. two_column_buf += input_ch;
  113. }
  114. }
  115. /* Computation is filed for every 2 columns */
  116. if (two_column_buf == buffer_a + 2 * input_ch * kernel_y * kernel_x)
  117. {
  118. out = arm_nn_mat_mult_kernel_s16(filter_data,
  119. buffer_a,
  120. output_ch,
  121. output_shift,
  122. output_mult,
  123. out_activation_min,
  124. out_activation_max,
  125. (input_ch * kernel_y * kernel_x),
  126. bias_data,
  127. out);
  128. /* Counter reset */
  129. two_column_buf = buffer_a;
  130. }
  131. }
  132. }
  133. /* Left-over because odd number of output pixels */
  134. if (two_column_buf != buffer_a)
  135. {
  136. const q7_t *ker_a = filter_data;
  137. int i;
  138. for (i = 0; i < output_ch; i++)
  139. {
  140. /* Init the accumulator*/
  141. q31_t sum = 0;
  142. /* Point to the beginning of the im2col buffer where the input is available as a rearranged column */
  143. const q15_t *ip_as_col = buffer_a;
  144. /* 4 multiply and accumulates are done in one loop. */
  145. uint16_t col_count = (input_ch * kernel_y * kernel_x) >> 2;
  146. while (col_count)
  147. {
  148. q31_t ker_a1, ker_a2;
  149. q31_t ip_b1, ip_b2;
  150. ker_a = read_and_pad(ker_a, &ker_a1, &ker_a2);
  151. ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col);
  152. sum = __SMLAD(ker_a1, ip_b1, sum);
  153. ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col);
  154. sum = __SMLAD(ker_a2, ip_b2, sum);
  155. col_count--;
  156. }
  157. /* Handle left over mac */
  158. col_count = input_ch * kernel_y * kernel_x & 0x3;
  159. while (col_count)
  160. {
  161. q7_t ker_a1 = *ker_a++;
  162. q15_t ip_b1 = *ip_as_col++;
  163. sum += ker_a1 * ip_b1;
  164. col_count--;
  165. }
  166. if (bias_data)
  167. {
  168. q31_t reduced_multiplier = REDUCE_MULTIPLIER(output_mult[i]);
  169. q63_t acc_64 = sum + bias_data[i];
  170. sum = arm_nn_requantize_s64(acc_64, reduced_multiplier, output_shift[i]);
  171. }
  172. else
  173. {
  174. sum = arm_nn_requantize(sum, output_mult[i], output_shift[i]);
  175. }
  176. sum = MAX(sum, out_activation_min);
  177. sum = MIN(sum, out_activation_max);
  178. *out++ = (q15_t)sum;
  179. }
  180. }
  181. #else
  182. (void)input_data;
  183. (void)output_data;
  184. (void)bias_data;
  185. (void)filter_data;
  186. (void)buffer_a;
  187. (void)kernel_x;
  188. (void)kernel_y;
  189. (void)pad_x;
  190. (void)pad_y;
  191. (void)stride_x;
  192. (void)stride_y;
  193. (void)out_activation_min;
  194. (void)out_activation_max;
  195. (void)output_mult;
  196. (void)output_shift;
  197. return ARM_MATH_ARGUMENT_ERROR;
  198. #endif
  199. /* Advance to the next batch */
  200. input_data += (input_x * input_y * input_ch);
  201. output_data += (output_x * output_y * output_ch);
  202. }
  203. /* Return to application */
  204. return ARM_MATH_SUCCESS;
  205. }
  206. int32_t arm_convolve_fast_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims)
  207. {
  208. #if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI)
  209. return (2 * input_dims->c * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int16_t);
  210. #else
  211. (void)input_dims;
  212. (void)filter_dims;
  213. return 0;
  214. #endif
  215. }
  216. /**
  217. * @} end of NNConv group
  218. */