arm_convolve_1x1_s8_fast.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /*
  2. * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. /* ----------------------------------------------------------------------
  19. * Project: CMSIS NN Library
  20. * Title: arm_convolve_1x1_s8_fast.c
  21. * Description: Fast q7 version of 1x1 convolution (non-square shape)
  22. *
  23. * $Date: 7 February 2020
  24. * $Revision: V.1.0.2
  25. *
  26. * Target Processor: Cortex-M cores
  27. *
  28. * -------------------------------------------------------------------- */
  29. #include "arm_nnfunctions.h"
  30. #define DIM_KER_X (1U)
  31. #define DIM_KER_Y (1U)
  32. /**
  33. * @ingroup groupNN
  34. */
  35. /**
  36. * @addtogroup NNConv
  37. * @{
  38. */
  39. /*
  40. * Fast s8 version for 1x1 convolution (non-square shape)
  41. *
  42. * Refer header file for details.
  43. *
  44. */
  45. arm_status arm_convolve_1x1_s8_fast(const q7_t *input,
  46. const uint16_t input_x,
  47. const uint16_t input_y,
  48. const uint16_t input_ch,
  49. const uint16_t input_batches,
  50. const q7_t *kernel,
  51. const uint16_t output_ch,
  52. const uint16_t pad_x,
  53. const uint16_t pad_y,
  54. const uint16_t stride_x,
  55. const uint16_t stride_y,
  56. const int32_t *bias,
  57. q7_t *output,
  58. const int32_t *output_shift,
  59. const int32_t *output_mult,
  60. const int32_t out_offset,
  61. const int32_t input_offset,
  62. const int32_t out_activation_min,
  63. const int32_t out_activation_max,
  64. const uint16_t output_x,
  65. const uint16_t output_y,
  66. q15_t *buffer_a)
  67. {
  68. if (input_ch % 4 != 0 ||
  69. pad_x != 0 || pad_y != 0 ||
  70. stride_x != 1 || stride_y != 1)
  71. {
  72. return ARM_MATH_SIZE_MISMATCH;
  73. }
  74. #if defined(ARM_MATH_MVEI)
  75. (void)buffer_a;
  76. int32_t col_len = input_x * input_y * input_batches;
  77. for (int i_items = 0; i_items <= (col_len - 4); i_items += 4)
  78. {
  79. for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++)
  80. {
  81. int32_t sum_row = 0;
  82. int32_t temp_out[4];
  83. (void)arm_nn_mat_mul_core_4x_s8(input_ch,
  84. input_ch,
  85. input + i_items * input_ch,
  86. kernel + i_out_ch * input_ch,
  87. &sum_row,
  88. temp_out);
  89. int32x4_t res = vldrwq_s32(temp_out);
  90. res = vaddq_n_s32(res, bias[i_out_ch]);
  91. sum_row = sum_row * input_offset;
  92. res = vaddq_n_s32(res, sum_row);
  93. res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]);
  94. res = vaddq_n_s32(res, out_offset);
  95. res = vmaxq_s32(res, vdupq_n_s32(out_activation_min));
  96. res = vminq_s32(res, vdupq_n_s32(out_activation_max));
  97. const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3};
  98. vstrbq_scatter_offset_s32(output, scatter_offset, res);
  99. output++;
  100. }
  101. output += (3 * output_ch);
  102. }
  103. /* Handle left over elements */
  104. for (int i_items = (col_len & ~0x3); i_items < col_len; i_items++)
  105. {
  106. for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++)
  107. {
  108. int32_t sum_row = 0;
  109. int32_t acc;
  110. (void)arm_nn_mat_mul_core_1x_s8(input_ch,
  111. input + i_items * input_ch,
  112. kernel + i_out_ch * input_ch,
  113. &sum_row,
  114. &acc);
  115. acc += bias[i_out_ch];
  116. sum_row = (sum_row * input_offset);
  117. acc += sum_row;
  118. acc = arm_nn_requantize(acc, output_mult[i_out_ch], output_shift[i_out_ch]);
  119. acc += out_offset;
  120. acc = MAX(acc, out_activation_min);
  121. acc = MIN(acc, out_activation_max);
  122. *output++ = acc;
  123. }
  124. }
  125. #else
  126. /* Run the following code as reference implementation for Cortex-M processors with or without DSP extension */
  127. (void)input_x;
  128. (void)input_y;
  129. (void)output_x;
  130. (void)output_y;
  131. (void)buffer_a;
  132. const int32_t lhs_rows = input_x * input_y * input_batches;
  133. const int32_t rhs_rows = output_ch;
  134. const int32_t rhs_cols = input_ch;
  135. arm_nn_mat_mult_nt_t_s8(input,
  136. kernel,
  137. bias,
  138. output,
  139. output_mult,
  140. output_shift,
  141. lhs_rows,
  142. rhs_rows,
  143. rhs_cols,
  144. input_offset,
  145. out_offset,
  146. out_activation_min,
  147. out_activation_max);
  148. #endif
  149. /* Return to application */
  150. return ARM_MATH_SUCCESS;
  151. }
  152. int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const uint16_t input_ch)
  153. {
  154. (void)input_ch;
  155. return 0;
  156. }
  157. /**
  158. * @} end of NNConv group
  159. */