arm_svm_linear_predict_f32.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_svm_linear_predict_f32.c
  4. * Description: SVM Linear Classifier
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/svm_functions.h"
  29. #include <limits.h>
  30. #include <math.h>
  31. /**
  32. * @addtogroup linearsvm
  33. * @{
  34. */
  35. /**
  36. * @brief SVM linear prediction
  37. * @param[in] S Pointer to an instance of the linear SVM structure.
  38. * @param[in] in Pointer to input vector
  39. * @param[out] pResult Decision value
  40. * @return none.
  41. *
  42. */
  43. #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
  44. #include "arm_helium_utils.h"
  45. void arm_svm_linear_predict_f32(
  46. const arm_svm_linear_instance_f32 *S,
  47. const float32_t * in,
  48. int32_t * pResult)
  49. {
  50. /* inlined Matrix x Vector function interleaved with dot prod */
  51. uint32_t numRows = S->nbOfSupportVectors;
  52. uint32_t numCols = S->vectorDimension;
  53. const float32_t *pSupport = S->supportVectors;
  54. const float32_t *pSrcA = pSupport;
  55. const float32_t *pInA0;
  56. const float32_t *pInA1;
  57. uint32_t row;
  58. uint32_t blkCnt; /* loop counters */
  59. const float32_t *pDualCoef = S->dualCoefficients;
  60. float32_t sum = S->intercept;
  61. row = numRows;
  62. /*
  63. * compute 4 rows in parrallel
  64. */
  65. while (row >= 4)
  66. {
  67. const float32_t *pInA2, *pInA3;
  68. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec, *pInVec;
  69. f32x4_t vecIn, acc0, acc1, acc2, acc3;
  70. float32_t const *pSrcVecPtr = in;
  71. /*
  72. * Initialize the pointers to 4 consecutive MatrixA rows
  73. */
  74. pInA0 = pSrcA;
  75. pInA1 = pInA0 + numCols;
  76. pInA2 = pInA1 + numCols;
  77. pInA3 = pInA2 + numCols;
  78. /*
  79. * Initialize the vector pointer
  80. */
  81. pInVec = pSrcVecPtr;
  82. /*
  83. * reset accumulators
  84. */
  85. acc0 = vdupq_n_f32(0.0f);
  86. acc1 = vdupq_n_f32(0.0f);
  87. acc2 = vdupq_n_f32(0.0f);
  88. acc3 = vdupq_n_f32(0.0f);
  89. pSrcA0Vec = pInA0;
  90. pSrcA1Vec = pInA1;
  91. pSrcA2Vec = pInA2;
  92. pSrcA3Vec = pInA3;
  93. blkCnt = numCols >> 2;
  94. while (blkCnt > 0U) {
  95. f32x4_t vecA;
  96. vecIn = vld1q(pInVec);
  97. pInVec += 4;
  98. vecA = vld1q(pSrcA0Vec);
  99. pSrcA0Vec += 4;
  100. acc0 = vfmaq(acc0, vecIn, vecA);
  101. vecA = vld1q(pSrcA1Vec);
  102. pSrcA1Vec += 4;
  103. acc1 = vfmaq(acc1, vecIn, vecA);
  104. vecA = vld1q(pSrcA2Vec);
  105. pSrcA2Vec += 4;
  106. acc2 = vfmaq(acc2, vecIn, vecA);
  107. vecA = vld1q(pSrcA3Vec);
  108. pSrcA3Vec += 4;
  109. acc3 = vfmaq(acc3, vecIn, vecA);
  110. blkCnt--;
  111. }
  112. /*
  113. * tail
  114. * (will be merged thru tail predication)
  115. */
  116. blkCnt = numCols & 3;
  117. if (blkCnt > 0U) {
  118. mve_pred16_t p0 = vctp32q(blkCnt);
  119. f32x4_t vecA;
  120. vecIn = vldrwq_z_f32(pInVec, p0);
  121. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  122. acc0 = vfmaq(acc0, vecIn, vecA);
  123. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  124. acc1 = vfmaq(acc1, vecIn, vecA);
  125. vecA = vldrwq_z_f32(pSrcA2Vec, p0);
  126. acc2 = vfmaq(acc2, vecIn, vecA);
  127. vecA = vldrwq_z_f32(pSrcA3Vec, p0);
  128. acc3 = vfmaq(acc3, vecIn, vecA);
  129. }
  130. /*
  131. * Sum the partial parts
  132. */
  133. acc0 = vmulq_n_f32(acc0,*pDualCoef++);
  134. acc0 = vfmaq_n_f32(acc0,acc1,*pDualCoef++);
  135. acc0 = vfmaq_n_f32(acc0,acc2,*pDualCoef++);
  136. acc0 = vfmaq_n_f32(acc0,acc3,*pDualCoef++);
  137. sum += vecAddAcrossF32Mve(acc0);
  138. pSrcA += numCols * 4;
  139. /*
  140. * Decrement the row loop counter
  141. */
  142. row -= 4;
  143. }
  144. /*
  145. * compute 2 rows in parallel
  146. */
  147. if (row >= 2) {
  148. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pInVec;
  149. f32x4_t vecIn, acc0, acc1;
  150. float32_t const *pSrcVecPtr = in;
  151. /*
  152. * Initialize the pointers to 2 consecutive MatrixA rows
  153. */
  154. pInA0 = pSrcA;
  155. pInA1 = pInA0 + numCols;
  156. /*
  157. * Initialize the vector pointer
  158. */
  159. pInVec = pSrcVecPtr;
  160. /*
  161. * reset accumulators
  162. */
  163. acc0 = vdupq_n_f32(0.0f);
  164. acc1 = vdupq_n_f32(0.0f);
  165. pSrcA0Vec = pInA0;
  166. pSrcA1Vec = pInA1;
  167. blkCnt = numCols >> 2;
  168. while (blkCnt > 0U) {
  169. f32x4_t vecA;
  170. vecIn = vld1q(pInVec);
  171. pInVec += 4;
  172. vecA = vld1q(pSrcA0Vec);
  173. pSrcA0Vec += 4;
  174. acc0 = vfmaq(acc0, vecIn, vecA);
  175. vecA = vld1q(pSrcA1Vec);
  176. pSrcA1Vec += 4;
  177. acc1 = vfmaq(acc1, vecIn, vecA);
  178. blkCnt--;
  179. }
  180. /*
  181. * tail
  182. * (will be merged thru tail predication)
  183. */
  184. blkCnt = numCols & 3;
  185. if (blkCnt > 0U) {
  186. mve_pred16_t p0 = vctp32q(blkCnt);
  187. f32x4_t vecA;
  188. vecIn = vldrwq_z_f32(pInVec, p0);
  189. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  190. acc0 = vfmaq(acc0, vecIn, vecA);
  191. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  192. acc1 = vfmaq(acc1, vecIn, vecA);
  193. }
  194. /*
  195. * Sum the partial parts
  196. */
  197. acc0 = vmulq_n_f32(acc0,*pDualCoef++);
  198. acc0 = vfmaq_n_f32(acc0,acc1,*pDualCoef++);
  199. sum += vecAddAcrossF32Mve(acc0);
  200. pSrcA += numCols * 2;
  201. row -= 2;
  202. }
  203. if (row >= 1) {
  204. f32x4_t vecIn, acc0;
  205. float32_t const *pSrcA0Vec, *pInVec;
  206. float32_t const *pSrcVecPtr = in;
  207. /*
  208. * Initialize the pointers to last MatrixA row
  209. */
  210. pInA0 = pSrcA;
  211. /*
  212. * Initialize the vector pointer
  213. */
  214. pInVec = pSrcVecPtr;
  215. /*
  216. * reset accumulators
  217. */
  218. acc0 = vdupq_n_f32(0.0f);
  219. pSrcA0Vec = pInA0;
  220. blkCnt = numCols >> 2;
  221. while (blkCnt > 0U) {
  222. f32x4_t vecA;
  223. vecIn = vld1q(pInVec);
  224. pInVec += 4;
  225. vecA = vld1q(pSrcA0Vec);
  226. pSrcA0Vec += 4;
  227. acc0 = vfmaq(acc0, vecIn, vecA);
  228. blkCnt--;
  229. }
  230. /*
  231. * tail
  232. * (will be merged thru tail predication)
  233. */
  234. blkCnt = numCols & 3;
  235. if (blkCnt > 0U) {
  236. mve_pred16_t p0 = vctp32q(blkCnt);
  237. f32x4_t vecA;
  238. vecIn = vldrwq_z_f32(pInVec, p0);
  239. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  240. acc0 = vfmaq(acc0, vecIn, vecA);
  241. }
  242. /*
  243. * Sum the partial parts
  244. */
  245. sum += *pDualCoef++ * vecAddAcrossF32Mve(acc0);
  246. }
  247. *pResult = S->classes[STEP(sum)];
  248. }
  249. #else
  250. #if defined(ARM_MATH_NEON)
  251. void arm_svm_linear_predict_f32(
  252. const arm_svm_linear_instance_f32 *S,
  253. const float32_t * in,
  254. int32_t * pResult)
  255. {
  256. float32_t sum = S->intercept;
  257. float32_t dot;
  258. float32x4_t dotV;
  259. float32x4_t accuma,accumb,accumc,accumd,accum;
  260. float32x2_t accum2;
  261. float32x4_t vec1;
  262. float32x4_t vec2,vec2a,vec2b,vec2c,vec2d;
  263. uint32_t blkCnt;
  264. uint32_t vectorBlkCnt;
  265. const float32_t *pIn = in;
  266. const float32_t *pSupport = S->supportVectors;
  267. const float32_t *pSupporta = S->supportVectors;
  268. const float32_t *pSupportb;
  269. const float32_t *pSupportc;
  270. const float32_t *pSupportd;
  271. pSupportb = pSupporta + S->vectorDimension;
  272. pSupportc = pSupportb + S->vectorDimension;
  273. pSupportd = pSupportc + S->vectorDimension;
  274. const float32_t *pDualCoefs = S->dualCoefficients;
  275. vectorBlkCnt = S->nbOfSupportVectors >> 2;
  276. while (vectorBlkCnt > 0U)
  277. {
  278. accuma = vdupq_n_f32(0);
  279. accumb = vdupq_n_f32(0);
  280. accumc = vdupq_n_f32(0);
  281. accumd = vdupq_n_f32(0);
  282. pIn = in;
  283. blkCnt = S->vectorDimension >> 2;
  284. while (blkCnt > 0U)
  285. {
  286. vec1 = vld1q_f32(pIn);
  287. vec2a = vld1q_f32(pSupporta);
  288. vec2b = vld1q_f32(pSupportb);
  289. vec2c = vld1q_f32(pSupportc);
  290. vec2d = vld1q_f32(pSupportd);
  291. pIn += 4;
  292. pSupporta += 4;
  293. pSupportb += 4;
  294. pSupportc += 4;
  295. pSupportd += 4;
  296. accuma = vmlaq_f32(accuma, vec1,vec2a);
  297. accumb = vmlaq_f32(accumb, vec1,vec2b);
  298. accumc = vmlaq_f32(accumc, vec1,vec2c);
  299. accumd = vmlaq_f32(accumd, vec1,vec2d);
  300. blkCnt -- ;
  301. }
  302. accum2 = vpadd_f32(vget_low_f32(accuma),vget_high_f32(accuma));
  303. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,0);
  304. accum2 = vpadd_f32(vget_low_f32(accumb),vget_high_f32(accumb));
  305. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,1);
  306. accum2 = vpadd_f32(vget_low_f32(accumc),vget_high_f32(accumc));
  307. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,2);
  308. accum2 = vpadd_f32(vget_low_f32(accumd),vget_high_f32(accumd));
  309. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,3);
  310. blkCnt = S->vectorDimension & 3;
  311. while (blkCnt > 0U)
  312. {
  313. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,0) + *pIn * *pSupporta++, dotV,0);
  314. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,1) + *pIn * *pSupportb++, dotV,1);
  315. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,2) + *pIn * *pSupportc++, dotV,2);
  316. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,3) + *pIn * *pSupportd++, dotV,3);
  317. pIn++;
  318. blkCnt -- ;
  319. }
  320. vec1 = vld1q_f32(pDualCoefs);
  321. pDualCoefs += 4;
  322. accum = vmulq_f32(vec1,dotV);
  323. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  324. sum += vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  325. pSupporta += 3*S->vectorDimension;
  326. pSupportb += 3*S->vectorDimension;
  327. pSupportc += 3*S->vectorDimension;
  328. pSupportd += 3*S->vectorDimension;
  329. vectorBlkCnt -- ;
  330. }
  331. pSupport = pSupporta;
  332. vectorBlkCnt = S->nbOfSupportVectors & 3;
  333. while (vectorBlkCnt > 0U)
  334. {
  335. accum = vdupq_n_f32(0);
  336. dot = 0.0f;
  337. pIn = in;
  338. blkCnt = S->vectorDimension >> 2;
  339. while (blkCnt > 0U)
  340. {
  341. vec1 = vld1q_f32(pIn);
  342. vec2 = vld1q_f32(pSupport);
  343. pIn += 4;
  344. pSupport += 4;
  345. accum = vmlaq_f32(accum, vec1,vec2);
  346. blkCnt -- ;
  347. }
  348. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  349. dot = vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  350. blkCnt = S->vectorDimension & 3;
  351. while (blkCnt > 0U)
  352. {
  353. dot = dot + *pIn++ * *pSupport++;
  354. blkCnt -- ;
  355. }
  356. sum += *pDualCoefs++ * dot;
  357. vectorBlkCnt -- ;
  358. }
  359. *pResult=S->classes[STEP(sum)];
  360. }
  361. #else
  362. void arm_svm_linear_predict_f32(
  363. const arm_svm_linear_instance_f32 *S,
  364. const float32_t * in,
  365. int32_t * pResult)
  366. {
  367. float32_t sum=S->intercept;
  368. float32_t dot=0;
  369. uint32_t i,j;
  370. const float32_t *pSupport = S->supportVectors;
  371. for(i=0; i < S->nbOfSupportVectors; i++)
  372. {
  373. dot=0;
  374. for(j=0; j < S->vectorDimension; j++)
  375. {
  376. dot = dot + in[j]* *pSupport++;
  377. }
  378. sum += S->dualCoefficients[i] * dot;
  379. }
  380. *pResult=S->classes[STEP(sum)];
  381. }
  382. #endif
  383. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  384. /**
  385. * @} end of linearsvm group
  386. */