arm_svm_linear_predict_f16.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_svm_linear_predict_f16.c
  4. * Description: SVM Linear Classifier
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/svm_functions_f16.h"
  29. #if defined(ARM_FLOAT16_SUPPORTED)
  30. #include <limits.h>
  31. #include <math.h>
  32. /**
  33. * @addtogroup linearsvm
  34. * @{
  35. */
  36. /**
  37. * @brief SVM linear prediction
  38. * @param[in] S Pointer to an instance of the linear SVM structure.
  39. * @param[in] in Pointer to input vector
  40. * @param[out] pResult Decision value
  41. * @return none.
  42. *
  43. */
  44. #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
  45. #include "arm_helium_utils.h"
  46. void arm_svm_linear_predict_f16(
  47. const arm_svm_linear_instance_f16 *S,
  48. const float16_t * in,
  49. int32_t * pResult)
  50. {
  51. /* inlined Matrix x Vector function interleaved with dot prod */
  52. uint32_t numRows = S->nbOfSupportVectors;
  53. uint32_t numCols = S->vectorDimension;
  54. const float16_t *pSupport = S->supportVectors;
  55. const float16_t *pSrcA = pSupport;
  56. const float16_t *pInA0;
  57. const float16_t *pInA1;
  58. uint32_t row;
  59. uint32_t blkCnt; /* loop counters */
  60. const float16_t *pDualCoef = S->dualCoefficients;
  61. _Float16 sum = S->intercept;
  62. row = numRows;
  63. /*
  64. * compute 4 rows in parrallel
  65. */
  66. while (row >= 4)
  67. {
  68. const float16_t *pInA2, *pInA3;
  69. float16_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec, *pInVec;
  70. f16x8_t vecIn, acc0, acc1, acc2, acc3;
  71. float16_t const *pSrcVecPtr = in;
  72. /*
  73. * Initialize the pointers to 4 consecutive MatrixA rows
  74. */
  75. pInA0 = pSrcA;
  76. pInA1 = pInA0 + numCols;
  77. pInA2 = pInA1 + numCols;
  78. pInA3 = pInA2 + numCols;
  79. /*
  80. * Initialize the vector pointer
  81. */
  82. pInVec = pSrcVecPtr;
  83. /*
  84. * reset accumulators
  85. */
  86. acc0 = vdupq_n_f16(0.0f);
  87. acc1 = vdupq_n_f16(0.0f);
  88. acc2 = vdupq_n_f16(0.0f);
  89. acc3 = vdupq_n_f16(0.0f);
  90. pSrcA0Vec = pInA0;
  91. pSrcA1Vec = pInA1;
  92. pSrcA2Vec = pInA2;
  93. pSrcA3Vec = pInA3;
  94. blkCnt = numCols >> 3;
  95. while (blkCnt > 0U) {
  96. f16x8_t vecA;
  97. vecIn = vld1q(pInVec);
  98. pInVec += 8;
  99. vecA = vld1q(pSrcA0Vec);
  100. pSrcA0Vec += 8;
  101. acc0 = vfmaq(acc0, vecIn, vecA);
  102. vecA = vld1q(pSrcA1Vec);
  103. pSrcA1Vec += 8;
  104. acc1 = vfmaq(acc1, vecIn, vecA);
  105. vecA = vld1q(pSrcA2Vec);
  106. pSrcA2Vec += 8;
  107. acc2 = vfmaq(acc2, vecIn, vecA);
  108. vecA = vld1q(pSrcA3Vec);
  109. pSrcA3Vec += 8;
  110. acc3 = vfmaq(acc3, vecIn, vecA);
  111. blkCnt--;
  112. }
  113. /*
  114. * tail
  115. * (will be merged thru tail predication)
  116. */
  117. blkCnt = numCols & 7;
  118. if (blkCnt > 0U) {
  119. mve_pred16_t p0 = vctp16q(blkCnt);
  120. f16x8_t vecA;
  121. vecIn = vldrhq_z_f16(pInVec, p0);
  122. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  123. acc0 = vfmaq(acc0, vecIn, vecA);
  124. vecA = vldrhq_z_f16(pSrcA1Vec, p0);
  125. acc1 = vfmaq(acc1, vecIn, vecA);
  126. vecA = vldrhq_z_f16(pSrcA2Vec, p0);
  127. acc2 = vfmaq(acc2, vecIn, vecA);
  128. vecA = vldrhq_z_f16(pSrcA3Vec, p0);
  129. acc3 = vfmaq(acc3, vecIn, vecA);
  130. }
  131. /*
  132. * Sum the partial parts
  133. */
  134. acc0 = vmulq_n_f16(acc0,*pDualCoef++);
  135. acc0 = vfmaq_n_f16(acc0,acc1,*pDualCoef++);
  136. acc0 = vfmaq_n_f16(acc0,acc2,*pDualCoef++);
  137. acc0 = vfmaq_n_f16(acc0,acc3,*pDualCoef++);
  138. sum += (_Float16)vecAddAcrossF16Mve(acc0);
  139. pSrcA += numCols * 4;
  140. /*
  141. * Decrement the row loop counter
  142. */
  143. row -= 4;
  144. }
  145. /*
  146. * compute 2 rows in parallel
  147. */
  148. if (row >= 2) {
  149. float16_t const *pSrcA0Vec, *pSrcA1Vec, *pInVec;
  150. f16x8_t vecIn, acc0, acc1;
  151. float16_t const *pSrcVecPtr = in;
  152. /*
  153. * Initialize the pointers to 2 consecutive MatrixA rows
  154. */
  155. pInA0 = pSrcA;
  156. pInA1 = pInA0 + numCols;
  157. /*
  158. * Initialize the vector pointer
  159. */
  160. pInVec = pSrcVecPtr;
  161. /*
  162. * reset accumulators
  163. */
  164. acc0 = vdupq_n_f16(0.0f);
  165. acc1 = vdupq_n_f16(0.0f);
  166. pSrcA0Vec = pInA0;
  167. pSrcA1Vec = pInA1;
  168. blkCnt = numCols >> 3;
  169. while (blkCnt > 0U) {
  170. f16x8_t vecA;
  171. vecIn = vld1q(pInVec);
  172. pInVec += 8;
  173. vecA = vld1q(pSrcA0Vec);
  174. pSrcA0Vec += 8;
  175. acc0 = vfmaq(acc0, vecIn, vecA);
  176. vecA = vld1q(pSrcA1Vec);
  177. pSrcA1Vec += 8;
  178. acc1 = vfmaq(acc1, vecIn, vecA);
  179. blkCnt--;
  180. }
  181. /*
  182. * tail
  183. * (will be merged thru tail predication)
  184. */
  185. blkCnt = numCols & 7;
  186. if (blkCnt > 0U) {
  187. mve_pred16_t p0 = vctp16q(blkCnt);
  188. f16x8_t vecA;
  189. vecIn = vldrhq_z_f16(pInVec, p0);
  190. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  191. acc0 = vfmaq(acc0, vecIn, vecA);
  192. vecA = vldrhq_z_f16(pSrcA1Vec, p0);
  193. acc1 = vfmaq(acc1, vecIn, vecA);
  194. }
  195. /*
  196. * Sum the partial parts
  197. */
  198. acc0 = vmulq_n_f16(acc0,*pDualCoef++);
  199. acc0 = vfmaq_n_f16(acc0,acc1,*pDualCoef++);
  200. sum += (_Float16)vecAddAcrossF16Mve(acc0);
  201. pSrcA += numCols * 2;
  202. row -= 2;
  203. }
  204. if (row >= 1) {
  205. f16x8_t vecIn, acc0;
  206. float16_t const *pSrcA0Vec, *pInVec;
  207. float16_t const *pSrcVecPtr = in;
  208. /*
  209. * Initialize the pointers to last MatrixA row
  210. */
  211. pInA0 = pSrcA;
  212. /*
  213. * Initialize the vector pointer
  214. */
  215. pInVec = pSrcVecPtr;
  216. /*
  217. * reset accumulators
  218. */
  219. acc0 = vdupq_n_f16(0.0f);
  220. pSrcA0Vec = pInA0;
  221. blkCnt = numCols >> 3;
  222. while (blkCnt > 0U) {
  223. f16x8_t vecA;
  224. vecIn = vld1q(pInVec);
  225. pInVec += 8;
  226. vecA = vld1q(pSrcA0Vec);
  227. pSrcA0Vec += 8;
  228. acc0 = vfmaq(acc0, vecIn, vecA);
  229. blkCnt--;
  230. }
  231. /*
  232. * tail
  233. * (will be merged thru tail predication)
  234. */
  235. blkCnt = numCols & 7;
  236. if (blkCnt > 0U) {
  237. mve_pred16_t p0 = vctp16q(blkCnt);
  238. f16x8_t vecA;
  239. vecIn = vldrhq_z_f16(pInVec, p0);
  240. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  241. acc0 = vfmaq(acc0, vecIn, vecA);
  242. }
  243. /*
  244. * Sum the partial parts
  245. */
  246. sum += (_Float16)*pDualCoef++ * (_Float16)vecAddAcrossF16Mve(acc0);
  247. }
  248. *pResult = S->classes[STEP(sum)];
  249. }
  250. #else
  251. void arm_svm_linear_predict_f16(
  252. const arm_svm_linear_instance_f16 *S,
  253. const float16_t * in,
  254. int32_t * pResult)
  255. {
  256. _Float16 sum=S->intercept;
  257. _Float16 dot=0;
  258. uint32_t i,j;
  259. const float16_t *pSupport = S->supportVectors;
  260. for(i=0; i < S->nbOfSupportVectors; i++)
  261. {
  262. dot=0;
  263. for(j=0; j < S->vectorDimension; j++)
  264. {
  265. dot = (_Float16)dot + (_Float16)in[j]* (_Float16)*pSupport++;
  266. }
  267. sum += (_Float16)S->dualCoefficients[i] * (_Float16)dot;
  268. }
  269. *pResult=S->classes[STEP(sum)];
  270. }
  271. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  272. /**
  273. * @} end of linearsvm group
  274. */
  275. #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */