arm_svm_linear_predict_f16.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_svm_linear_predict_f16.c
  4. * Description: SVM Linear Classifier
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/svm_functions_f16.h"
  29. #if defined(ARM_FLOAT16_SUPPORTED)
  30. #include <limits.h>
  31. #include <math.h>
  32. /**
  33. * @addtogroup linearsvm
  34. * @{
  35. */
  36. /**
  37. * @brief SVM linear prediction
  38. * @param[in] S Pointer to an instance of the linear SVM structure.
  39. * @param[in] in Pointer to input vector
  40. * @param[out] pResult Decision value
  41. *
  42. */
  43. #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
  44. #include "arm_helium_utils.h"
  45. void arm_svm_linear_predict_f16(
  46. const arm_svm_linear_instance_f16 *S,
  47. const float16_t * in,
  48. int32_t * pResult)
  49. {
  50. /* inlined Matrix x Vector function interleaved with dot prod */
  51. uint32_t numRows = S->nbOfSupportVectors;
  52. uint32_t numCols = S->vectorDimension;
  53. const float16_t *pSupport = S->supportVectors;
  54. const float16_t *pSrcA = pSupport;
  55. const float16_t *pInA0;
  56. const float16_t *pInA1;
  57. uint32_t row;
  58. uint32_t blkCnt; /* loop counters */
  59. const float16_t *pDualCoef = S->dualCoefficients;
  60. _Float16 sum = S->intercept;
  61. row = numRows;
  62. /*
  63. * compute 4 rows in parrallel
  64. */
  65. while (row >= 4)
  66. {
  67. const float16_t *pInA2, *pInA3;
  68. float16_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec, *pInVec;
  69. f16x8_t vecIn, acc0, acc1, acc2, acc3;
  70. float16_t const *pSrcVecPtr = in;
  71. /*
  72. * Initialize the pointers to 4 consecutive MatrixA rows
  73. */
  74. pInA0 = pSrcA;
  75. pInA1 = pInA0 + numCols;
  76. pInA2 = pInA1 + numCols;
  77. pInA3 = pInA2 + numCols;
  78. /*
  79. * Initialize the vector pointer
  80. */
  81. pInVec = pSrcVecPtr;
  82. /*
  83. * reset accumulators
  84. */
  85. acc0 = vdupq_n_f16(0.0f);
  86. acc1 = vdupq_n_f16(0.0f);
  87. acc2 = vdupq_n_f16(0.0f);
  88. acc3 = vdupq_n_f16(0.0f);
  89. pSrcA0Vec = pInA0;
  90. pSrcA1Vec = pInA1;
  91. pSrcA2Vec = pInA2;
  92. pSrcA3Vec = pInA3;
  93. blkCnt = numCols >> 3;
  94. while (blkCnt > 0U) {
  95. f16x8_t vecA;
  96. vecIn = vld1q(pInVec);
  97. pInVec += 8;
  98. vecA = vld1q(pSrcA0Vec);
  99. pSrcA0Vec += 8;
  100. acc0 = vfmaq(acc0, vecIn, vecA);
  101. vecA = vld1q(pSrcA1Vec);
  102. pSrcA1Vec += 8;
  103. acc1 = vfmaq(acc1, vecIn, vecA);
  104. vecA = vld1q(pSrcA2Vec);
  105. pSrcA2Vec += 8;
  106. acc2 = vfmaq(acc2, vecIn, vecA);
  107. vecA = vld1q(pSrcA3Vec);
  108. pSrcA3Vec += 8;
  109. acc3 = vfmaq(acc3, vecIn, vecA);
  110. blkCnt--;
  111. }
  112. /*
  113. * tail
  114. * (will be merged thru tail predication)
  115. */
  116. blkCnt = numCols & 7;
  117. if (blkCnt > 0U) {
  118. mve_pred16_t p0 = vctp16q(blkCnt);
  119. f16x8_t vecA;
  120. vecIn = vldrhq_z_f16(pInVec, p0);
  121. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  122. acc0 = vfmaq(acc0, vecIn, vecA);
  123. vecA = vldrhq_z_f16(pSrcA1Vec, p0);
  124. acc1 = vfmaq(acc1, vecIn, vecA);
  125. vecA = vldrhq_z_f16(pSrcA2Vec, p0);
  126. acc2 = vfmaq(acc2, vecIn, vecA);
  127. vecA = vldrhq_z_f16(pSrcA3Vec, p0);
  128. acc3 = vfmaq(acc3, vecIn, vecA);
  129. }
  130. /*
  131. * Sum the partial parts
  132. */
  133. acc0 = vmulq_n_f16(acc0,*pDualCoef++);
  134. acc0 = vfmaq_n_f16(acc0,acc1,*pDualCoef++);
  135. acc0 = vfmaq_n_f16(acc0,acc2,*pDualCoef++);
  136. acc0 = vfmaq_n_f16(acc0,acc3,*pDualCoef++);
  137. sum += (_Float16)vecAddAcrossF16Mve(acc0);
  138. pSrcA += numCols * 4;
  139. /*
  140. * Decrement the row loop counter
  141. */
  142. row -= 4;
  143. }
  144. /*
  145. * compute 2 rows in parallel
  146. */
  147. if (row >= 2) {
  148. float16_t const *pSrcA0Vec, *pSrcA1Vec, *pInVec;
  149. f16x8_t vecIn, acc0, acc1;
  150. float16_t const *pSrcVecPtr = in;
  151. /*
  152. * Initialize the pointers to 2 consecutive MatrixA rows
  153. */
  154. pInA0 = pSrcA;
  155. pInA1 = pInA0 + numCols;
  156. /*
  157. * Initialize the vector pointer
  158. */
  159. pInVec = pSrcVecPtr;
  160. /*
  161. * reset accumulators
  162. */
  163. acc0 = vdupq_n_f16(0.0f);
  164. acc1 = vdupq_n_f16(0.0f);
  165. pSrcA0Vec = pInA0;
  166. pSrcA1Vec = pInA1;
  167. blkCnt = numCols >> 3;
  168. while (blkCnt > 0U) {
  169. f16x8_t vecA;
  170. vecIn = vld1q(pInVec);
  171. pInVec += 8;
  172. vecA = vld1q(pSrcA0Vec);
  173. pSrcA0Vec += 8;
  174. acc0 = vfmaq(acc0, vecIn, vecA);
  175. vecA = vld1q(pSrcA1Vec);
  176. pSrcA1Vec += 8;
  177. acc1 = vfmaq(acc1, vecIn, vecA);
  178. blkCnt--;
  179. }
  180. /*
  181. * tail
  182. * (will be merged thru tail predication)
  183. */
  184. blkCnt = numCols & 7;
  185. if (blkCnt > 0U) {
  186. mve_pred16_t p0 = vctp16q(blkCnt);
  187. f16x8_t vecA;
  188. vecIn = vldrhq_z_f16(pInVec, p0);
  189. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  190. acc0 = vfmaq(acc0, vecIn, vecA);
  191. vecA = vldrhq_z_f16(pSrcA1Vec, p0);
  192. acc1 = vfmaq(acc1, vecIn, vecA);
  193. }
  194. /*
  195. * Sum the partial parts
  196. */
  197. acc0 = vmulq_n_f16(acc0,*pDualCoef++);
  198. acc0 = vfmaq_n_f16(acc0,acc1,*pDualCoef++);
  199. sum += (_Float16)vecAddAcrossF16Mve(acc0);
  200. pSrcA += numCols * 2;
  201. row -= 2;
  202. }
  203. if (row >= 1) {
  204. f16x8_t vecIn, acc0;
  205. float16_t const *pSrcA0Vec, *pInVec;
  206. float16_t const *pSrcVecPtr = in;
  207. /*
  208. * Initialize the pointers to last MatrixA row
  209. */
  210. pInA0 = pSrcA;
  211. /*
  212. * Initialize the vector pointer
  213. */
  214. pInVec = pSrcVecPtr;
  215. /*
  216. * reset accumulators
  217. */
  218. acc0 = vdupq_n_f16(0.0f);
  219. pSrcA0Vec = pInA0;
  220. blkCnt = numCols >> 3;
  221. while (blkCnt > 0U) {
  222. f16x8_t vecA;
  223. vecIn = vld1q(pInVec);
  224. pInVec += 8;
  225. vecA = vld1q(pSrcA0Vec);
  226. pSrcA0Vec += 8;
  227. acc0 = vfmaq(acc0, vecIn, vecA);
  228. blkCnt--;
  229. }
  230. /*
  231. * tail
  232. * (will be merged thru tail predication)
  233. */
  234. blkCnt = numCols & 7;
  235. if (blkCnt > 0U) {
  236. mve_pred16_t p0 = vctp16q(blkCnt);
  237. f16x8_t vecA;
  238. vecIn = vldrhq_z_f16(pInVec, p0);
  239. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  240. acc0 = vfmaq(acc0, vecIn, vecA);
  241. }
  242. /*
  243. * Sum the partial parts
  244. */
  245. sum += (_Float16)*pDualCoef++ * (_Float16)vecAddAcrossF16Mve(acc0);
  246. }
  247. *pResult = S->classes[STEP(sum)];
  248. }
  249. #else
  250. void arm_svm_linear_predict_f16(
  251. const arm_svm_linear_instance_f16 *S,
  252. const float16_t * in,
  253. int32_t * pResult)
  254. {
  255. _Float16 sum=S->intercept;
  256. _Float16 dot=0;
  257. uint32_t i,j;
  258. const float16_t *pSupport = S->supportVectors;
  259. for(i=0; i < S->nbOfSupportVectors; i++)
  260. {
  261. dot=0;
  262. for(j=0; j < S->vectorDimension; j++)
  263. {
  264. dot = (_Float16)dot + (_Float16)in[j]* (_Float16)*pSupport++;
  265. }
  266. sum += (_Float16)S->dualCoefficients[i] * (_Float16)dot;
  267. }
  268. *pResult=S->classes[STEP(sum)];
  269. }
  270. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  271. /**
  272. * @} end of linearsvm group
  273. */
  274. #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */