arm_svm_linear_predict_f32.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_svm_linear_predict_f32.c
  4. * Description: SVM Linear Classifier
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/svm_functions.h"
  29. #include <limits.h>
  30. #include <math.h>
  31. /**
  32. * @addtogroup linearsvm
  33. * @{
  34. */
  35. /**
  36. * @brief SVM linear prediction
  37. * @param[in] S Pointer to an instance of the linear SVM structure.
  38. * @param[in] in Pointer to input vector
  39. * @param[out] pResult Decision value
  40. *
  41. */
  42. #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
  43. #include "arm_helium_utils.h"
  44. void arm_svm_linear_predict_f32(
  45. const arm_svm_linear_instance_f32 *S,
  46. const float32_t * in,
  47. int32_t * pResult)
  48. {
  49. /* inlined Matrix x Vector function interleaved with dot prod */
  50. uint32_t numRows = S->nbOfSupportVectors;
  51. uint32_t numCols = S->vectorDimension;
  52. const float32_t *pSupport = S->supportVectors;
  53. const float32_t *pSrcA = pSupport;
  54. const float32_t *pInA0;
  55. const float32_t *pInA1;
  56. uint32_t row;
  57. uint32_t blkCnt; /* loop counters */
  58. const float32_t *pDualCoef = S->dualCoefficients;
  59. float32_t sum = S->intercept;
  60. row = numRows;
  61. /*
  62. * compute 4 rows in parrallel
  63. */
  64. while (row >= 4)
  65. {
  66. const float32_t *pInA2, *pInA3;
  67. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec, *pInVec;
  68. f32x4_t vecIn, acc0, acc1, acc2, acc3;
  69. float32_t const *pSrcVecPtr = in;
  70. /*
  71. * Initialize the pointers to 4 consecutive MatrixA rows
  72. */
  73. pInA0 = pSrcA;
  74. pInA1 = pInA0 + numCols;
  75. pInA2 = pInA1 + numCols;
  76. pInA3 = pInA2 + numCols;
  77. /*
  78. * Initialize the vector pointer
  79. */
  80. pInVec = pSrcVecPtr;
  81. /*
  82. * reset accumulators
  83. */
  84. acc0 = vdupq_n_f32(0.0f);
  85. acc1 = vdupq_n_f32(0.0f);
  86. acc2 = vdupq_n_f32(0.0f);
  87. acc3 = vdupq_n_f32(0.0f);
  88. pSrcA0Vec = pInA0;
  89. pSrcA1Vec = pInA1;
  90. pSrcA2Vec = pInA2;
  91. pSrcA3Vec = pInA3;
  92. blkCnt = numCols >> 2;
  93. while (blkCnt > 0U) {
  94. f32x4_t vecA;
  95. vecIn = vld1q(pInVec);
  96. pInVec += 4;
  97. vecA = vld1q(pSrcA0Vec);
  98. pSrcA0Vec += 4;
  99. acc0 = vfmaq(acc0, vecIn, vecA);
  100. vecA = vld1q(pSrcA1Vec);
  101. pSrcA1Vec += 4;
  102. acc1 = vfmaq(acc1, vecIn, vecA);
  103. vecA = vld1q(pSrcA2Vec);
  104. pSrcA2Vec += 4;
  105. acc2 = vfmaq(acc2, vecIn, vecA);
  106. vecA = vld1q(pSrcA3Vec);
  107. pSrcA3Vec += 4;
  108. acc3 = vfmaq(acc3, vecIn, vecA);
  109. blkCnt--;
  110. }
  111. /*
  112. * tail
  113. * (will be merged thru tail predication)
  114. */
  115. blkCnt = numCols & 3;
  116. if (blkCnt > 0U) {
  117. mve_pred16_t p0 = vctp32q(blkCnt);
  118. f32x4_t vecA;
  119. vecIn = vldrwq_z_f32(pInVec, p0);
  120. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  121. acc0 = vfmaq(acc0, vecIn, vecA);
  122. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  123. acc1 = vfmaq(acc1, vecIn, vecA);
  124. vecA = vldrwq_z_f32(pSrcA2Vec, p0);
  125. acc2 = vfmaq(acc2, vecIn, vecA);
  126. vecA = vldrwq_z_f32(pSrcA3Vec, p0);
  127. acc3 = vfmaq(acc3, vecIn, vecA);
  128. }
  129. /*
  130. * Sum the partial parts
  131. */
  132. acc0 = vmulq_n_f32(acc0,*pDualCoef++);
  133. acc0 = vfmaq_n_f32(acc0,acc1,*pDualCoef++);
  134. acc0 = vfmaq_n_f32(acc0,acc2,*pDualCoef++);
  135. acc0 = vfmaq_n_f32(acc0,acc3,*pDualCoef++);
  136. sum += vecAddAcrossF32Mve(acc0);
  137. pSrcA += numCols * 4;
  138. /*
  139. * Decrement the row loop counter
  140. */
  141. row -= 4;
  142. }
  143. /*
  144. * compute 2 rows in parallel
  145. */
  146. if (row >= 2) {
  147. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pInVec;
  148. f32x4_t vecIn, acc0, acc1;
  149. float32_t const *pSrcVecPtr = in;
  150. /*
  151. * Initialize the pointers to 2 consecutive MatrixA rows
  152. */
  153. pInA0 = pSrcA;
  154. pInA1 = pInA0 + numCols;
  155. /*
  156. * Initialize the vector pointer
  157. */
  158. pInVec = pSrcVecPtr;
  159. /*
  160. * reset accumulators
  161. */
  162. acc0 = vdupq_n_f32(0.0f);
  163. acc1 = vdupq_n_f32(0.0f);
  164. pSrcA0Vec = pInA0;
  165. pSrcA1Vec = pInA1;
  166. blkCnt = numCols >> 2;
  167. while (blkCnt > 0U) {
  168. f32x4_t vecA;
  169. vecIn = vld1q(pInVec);
  170. pInVec += 4;
  171. vecA = vld1q(pSrcA0Vec);
  172. pSrcA0Vec += 4;
  173. acc0 = vfmaq(acc0, vecIn, vecA);
  174. vecA = vld1q(pSrcA1Vec);
  175. pSrcA1Vec += 4;
  176. acc1 = vfmaq(acc1, vecIn, vecA);
  177. blkCnt--;
  178. }
  179. /*
  180. * tail
  181. * (will be merged thru tail predication)
  182. */
  183. blkCnt = numCols & 3;
  184. if (blkCnt > 0U) {
  185. mve_pred16_t p0 = vctp32q(blkCnt);
  186. f32x4_t vecA;
  187. vecIn = vldrwq_z_f32(pInVec, p0);
  188. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  189. acc0 = vfmaq(acc0, vecIn, vecA);
  190. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  191. acc1 = vfmaq(acc1, vecIn, vecA);
  192. }
  193. /*
  194. * Sum the partial parts
  195. */
  196. acc0 = vmulq_n_f32(acc0,*pDualCoef++);
  197. acc0 = vfmaq_n_f32(acc0,acc1,*pDualCoef++);
  198. sum += vecAddAcrossF32Mve(acc0);
  199. pSrcA += numCols * 2;
  200. row -= 2;
  201. }
  202. if (row >= 1) {
  203. f32x4_t vecIn, acc0;
  204. float32_t const *pSrcA0Vec, *pInVec;
  205. float32_t const *pSrcVecPtr = in;
  206. /*
  207. * Initialize the pointers to last MatrixA row
  208. */
  209. pInA0 = pSrcA;
  210. /*
  211. * Initialize the vector pointer
  212. */
  213. pInVec = pSrcVecPtr;
  214. /*
  215. * reset accumulators
  216. */
  217. acc0 = vdupq_n_f32(0.0f);
  218. pSrcA0Vec = pInA0;
  219. blkCnt = numCols >> 2;
  220. while (blkCnt > 0U) {
  221. f32x4_t vecA;
  222. vecIn = vld1q(pInVec);
  223. pInVec += 4;
  224. vecA = vld1q(pSrcA0Vec);
  225. pSrcA0Vec += 4;
  226. acc0 = vfmaq(acc0, vecIn, vecA);
  227. blkCnt--;
  228. }
  229. /*
  230. * tail
  231. * (will be merged thru tail predication)
  232. */
  233. blkCnt = numCols & 3;
  234. if (blkCnt > 0U) {
  235. mve_pred16_t p0 = vctp32q(blkCnt);
  236. f32x4_t vecA;
  237. vecIn = vldrwq_z_f32(pInVec, p0);
  238. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  239. acc0 = vfmaq(acc0, vecIn, vecA);
  240. }
  241. /*
  242. * Sum the partial parts
  243. */
  244. sum += *pDualCoef++ * vecAddAcrossF32Mve(acc0);
  245. }
  246. *pResult = S->classes[STEP(sum)];
  247. }
  248. #else
  249. #if defined(ARM_MATH_NEON)
  250. void arm_svm_linear_predict_f32(
  251. const arm_svm_linear_instance_f32 *S,
  252. const float32_t * in,
  253. int32_t * pResult)
  254. {
  255. float32_t sum = S->intercept;
  256. float32_t dot;
  257. float32x4_t dotV;
  258. float32x4_t accuma,accumb,accumc,accumd,accum;
  259. float32x2_t accum2;
  260. float32x4_t vec1;
  261. float32x4_t vec2,vec2a,vec2b,vec2c,vec2d;
  262. uint32_t blkCnt;
  263. uint32_t vectorBlkCnt;
  264. const float32_t *pIn = in;
  265. const float32_t *pSupport = S->supportVectors;
  266. const float32_t *pSupporta = S->supportVectors;
  267. const float32_t *pSupportb;
  268. const float32_t *pSupportc;
  269. const float32_t *pSupportd;
  270. pSupportb = pSupporta + S->vectorDimension;
  271. pSupportc = pSupportb + S->vectorDimension;
  272. pSupportd = pSupportc + S->vectorDimension;
  273. const float32_t *pDualCoefs = S->dualCoefficients;
  274. vectorBlkCnt = S->nbOfSupportVectors >> 2;
  275. while (vectorBlkCnt > 0U)
  276. {
  277. accuma = vdupq_n_f32(0);
  278. accumb = vdupq_n_f32(0);
  279. accumc = vdupq_n_f32(0);
  280. accumd = vdupq_n_f32(0);
  281. pIn = in;
  282. blkCnt = S->vectorDimension >> 2;
  283. while (blkCnt > 0U)
  284. {
  285. vec1 = vld1q_f32(pIn);
  286. vec2a = vld1q_f32(pSupporta);
  287. vec2b = vld1q_f32(pSupportb);
  288. vec2c = vld1q_f32(pSupportc);
  289. vec2d = vld1q_f32(pSupportd);
  290. pIn += 4;
  291. pSupporta += 4;
  292. pSupportb += 4;
  293. pSupportc += 4;
  294. pSupportd += 4;
  295. accuma = vmlaq_f32(accuma, vec1,vec2a);
  296. accumb = vmlaq_f32(accumb, vec1,vec2b);
  297. accumc = vmlaq_f32(accumc, vec1,vec2c);
  298. accumd = vmlaq_f32(accumd, vec1,vec2d);
  299. blkCnt -- ;
  300. }
  301. accum2 = vpadd_f32(vget_low_f32(accuma),vget_high_f32(accuma));
  302. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,0);
  303. accum2 = vpadd_f32(vget_low_f32(accumb),vget_high_f32(accumb));
  304. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,1);
  305. accum2 = vpadd_f32(vget_low_f32(accumc),vget_high_f32(accumc));
  306. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,2);
  307. accum2 = vpadd_f32(vget_low_f32(accumd),vget_high_f32(accumd));
  308. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,3);
  309. blkCnt = S->vectorDimension & 3;
  310. while (blkCnt > 0U)
  311. {
  312. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,0) + *pIn * *pSupporta++, dotV,0);
  313. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,1) + *pIn * *pSupportb++, dotV,1);
  314. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,2) + *pIn * *pSupportc++, dotV,2);
  315. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,3) + *pIn * *pSupportd++, dotV,3);
  316. pIn++;
  317. blkCnt -- ;
  318. }
  319. vec1 = vld1q_f32(pDualCoefs);
  320. pDualCoefs += 4;
  321. accum = vmulq_f32(vec1,dotV);
  322. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  323. sum += vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  324. pSupporta += 3*S->vectorDimension;
  325. pSupportb += 3*S->vectorDimension;
  326. pSupportc += 3*S->vectorDimension;
  327. pSupportd += 3*S->vectorDimension;
  328. vectorBlkCnt -- ;
  329. }
  330. pSupport = pSupporta;
  331. vectorBlkCnt = S->nbOfSupportVectors & 3;
  332. while (vectorBlkCnt > 0U)
  333. {
  334. accum = vdupq_n_f32(0);
  335. dot = 0.0f;
  336. pIn = in;
  337. blkCnt = S->vectorDimension >> 2;
  338. while (blkCnt > 0U)
  339. {
  340. vec1 = vld1q_f32(pIn);
  341. vec2 = vld1q_f32(pSupport);
  342. pIn += 4;
  343. pSupport += 4;
  344. accum = vmlaq_f32(accum, vec1,vec2);
  345. blkCnt -- ;
  346. }
  347. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  348. dot = vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  349. blkCnt = S->vectorDimension & 3;
  350. while (blkCnt > 0U)
  351. {
  352. dot = dot + *pIn++ * *pSupport++;
  353. blkCnt -- ;
  354. }
  355. sum += *pDualCoefs++ * dot;
  356. vectorBlkCnt -- ;
  357. }
  358. *pResult=S->classes[STEP(sum)];
  359. }
  360. #else
  361. void arm_svm_linear_predict_f32(
  362. const arm_svm_linear_instance_f32 *S,
  363. const float32_t * in,
  364. int32_t * pResult)
  365. {
  366. float32_t sum=S->intercept;
  367. float32_t dot=0;
  368. uint32_t i,j;
  369. const float32_t *pSupport = S->supportVectors;
  370. for(i=0; i < S->nbOfSupportVectors; i++)
  371. {
  372. dot=0;
  373. for(j=0; j < S->vectorDimension; j++)
  374. {
  375. dot = dot + in[j]* *pSupport++;
  376. }
  377. sum += S->dualCoefficients[i] * dot;
  378. }
  379. *pResult=S->classes[STEP(sum)];
  380. }
  381. #endif
  382. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  383. /**
  384. * @} end of linearsvm group
  385. */