arm_svm_linear_predict_f32.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_svm_linear_predict_f32.c
  4. * Description: SVM Linear Classifier
  5. *
  6. *
  7. * Target Processor: Cortex-M and Cortex-A cores
  8. * -------------------------------------------------------------------- */
  9. /*
  10. * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.
  11. *
  12. * SPDX-License-Identifier: Apache-2.0
  13. *
  14. * Licensed under the Apache License, Version 2.0 (the License); you may
  15. * not use this file except in compliance with the License.
  16. * You may obtain a copy of the License at
  17. *
  18. * www.apache.org/licenses/LICENSE-2.0
  19. *
  20. * Unless required by applicable law or agreed to in writing, software
  21. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  22. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  23. * See the License for the specific language governing permissions and
  24. * limitations under the License.
  25. */
  26. #include "arm_math.h"
  27. #include <limits.h>
  28. #include <math.h>
  29. /**
  30. * @addtogroup groupSVM
  31. * @{
  32. */
  33. /**
  34. * @brief SVM linear prediction
  35. * @param[in] S Pointer to an instance of the linear SVM structure.
  36. * @param[in] in Pointer to input vector
  37. * @param[out] pResult Decision value
  38. * @return none.
  39. *
  40. */
  41. #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
  42. #include "arm_helium_utils.h"
  43. void arm_svm_linear_predict_f32(
  44. const arm_svm_linear_instance_f32 *S,
  45. const float32_t * in,
  46. int32_t * pResult)
  47. {
  48. /* inlined Matrix x Vector function interleaved with dot prod */
  49. uint32_t numRows = S->nbOfSupportVectors;
  50. uint32_t numCols = S->vectorDimension;
  51. const float32_t *pSupport = S->supportVectors;
  52. const float32_t *pSrcA = pSupport;
  53. const float32_t *pInA0;
  54. const float32_t *pInA1;
  55. uint32_t row;
  56. uint32_t blkCnt; /* loop counters */
  57. const float32_t *pDualCoef = S->dualCoefficients;
  58. float32_t sum = S->intercept;
  59. row = numRows;
  60. /*
  61. * compute 4 rows in parrallel
  62. */
  63. while (row >= 4)
  64. {
  65. const float32_t *pInA2, *pInA3;
  66. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec, *pInVec;
  67. f32x4_t vecIn, acc0, acc1, acc2, acc3;
  68. float32_t const *pSrcVecPtr = in;
  69. /*
  70. * Initialize the pointers to 4 consecutive MatrixA rows
  71. */
  72. pInA0 = pSrcA;
  73. pInA1 = pInA0 + numCols;
  74. pInA2 = pInA1 + numCols;
  75. pInA3 = pInA2 + numCols;
  76. /*
  77. * Initialize the vector pointer
  78. */
  79. pInVec = pSrcVecPtr;
  80. /*
  81. * reset accumulators
  82. */
  83. acc0 = vdupq_n_f32(0.0f);
  84. acc1 = vdupq_n_f32(0.0f);
  85. acc2 = vdupq_n_f32(0.0f);
  86. acc3 = vdupq_n_f32(0.0f);
  87. pSrcA0Vec = pInA0;
  88. pSrcA1Vec = pInA1;
  89. pSrcA2Vec = pInA2;
  90. pSrcA3Vec = pInA3;
  91. blkCnt = numCols >> 2;
  92. while (blkCnt > 0U) {
  93. f32x4_t vecA;
  94. vecIn = vld1q(pInVec);
  95. pInVec += 4;
  96. vecA = vld1q(pSrcA0Vec);
  97. pSrcA0Vec += 4;
  98. acc0 = vfmaq(acc0, vecIn, vecA);
  99. vecA = vld1q(pSrcA1Vec);
  100. pSrcA1Vec += 4;
  101. acc1 = vfmaq(acc1, vecIn, vecA);
  102. vecA = vld1q(pSrcA2Vec);
  103. pSrcA2Vec += 4;
  104. acc2 = vfmaq(acc2, vecIn, vecA);
  105. vecA = vld1q(pSrcA3Vec);
  106. pSrcA3Vec += 4;
  107. acc3 = vfmaq(acc3, vecIn, vecA);
  108. blkCnt--;
  109. }
  110. /*
  111. * tail
  112. * (will be merged thru tail predication)
  113. */
  114. blkCnt = numCols & 3;
  115. if (blkCnt > 0U) {
  116. mve_pred16_t p0 = vctp32q(blkCnt);
  117. f32x4_t vecA;
  118. vecIn = vldrwq_z_f32(pInVec, p0);
  119. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  120. acc0 = vfmaq(acc0, vecIn, vecA);
  121. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  122. acc1 = vfmaq(acc1, vecIn, vecA);
  123. vecA = vldrwq_z_f32(pSrcA2Vec, p0);
  124. acc2 = vfmaq(acc2, vecIn, vecA);
  125. vecA = vldrwq_z_f32(pSrcA3Vec, p0);
  126. acc3 = vfmaq(acc3, vecIn, vecA);
  127. }
  128. /*
  129. * Sum the partial parts
  130. */
  131. sum += *pDualCoef++ * vecAddAcrossF32Mve(acc0);
  132. sum += *pDualCoef++ * vecAddAcrossF32Mve(acc1);
  133. sum += *pDualCoef++ * vecAddAcrossF32Mve(acc2);
  134. sum += *pDualCoef++ * vecAddAcrossF32Mve(acc3);
  135. pSrcA += numCols * 4;
  136. /*
  137. * Decrement the row loop counter
  138. */
  139. row -= 4;
  140. }
  141. /*
  142. * compute 2 rows in parallel
  143. */
  144. if (row >= 2) {
  145. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pInVec;
  146. f32x4_t vecIn, acc0, acc1;
  147. float32_t const *pSrcVecPtr = in;
  148. /*
  149. * Initialize the pointers to 2 consecutive MatrixA rows
  150. */
  151. pInA0 = pSrcA;
  152. pInA1 = pInA0 + numCols;
  153. /*
  154. * Initialize the vector pointer
  155. */
  156. pInVec = pSrcVecPtr;
  157. /*
  158. * reset accumulators
  159. */
  160. acc0 = vdupq_n_f32(0.0f);
  161. acc1 = vdupq_n_f32(0.0f);
  162. pSrcA0Vec = pInA0;
  163. pSrcA1Vec = pInA1;
  164. blkCnt = numCols >> 2;
  165. while (blkCnt > 0U) {
  166. f32x4_t vecA;
  167. vecIn = vld1q(pInVec);
  168. pInVec += 4;
  169. vecA = vld1q(pSrcA0Vec);
  170. pSrcA0Vec += 4;
  171. acc0 = vfmaq(acc0, vecIn, vecA);
  172. vecA = vld1q(pSrcA1Vec);
  173. pSrcA1Vec += 4;
  174. acc1 = vfmaq(acc1, vecIn, vecA);
  175. blkCnt--;
  176. }
  177. /*
  178. * tail
  179. * (will be merged thru tail predication)
  180. */
  181. blkCnt = numCols & 3;
  182. if (blkCnt > 0U) {
  183. mve_pred16_t p0 = vctp32q(blkCnt);
  184. f32x4_t vecA;
  185. vecIn = vldrwq_z_f32(pInVec, p0);
  186. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  187. acc0 = vfmaq(acc0, vecIn, vecA);
  188. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  189. acc1 = vfmaq(acc1, vecIn, vecA);
  190. }
  191. /*
  192. * Sum the partial parts
  193. */
  194. sum += *pDualCoef++ * vecAddAcrossF32Mve(acc0);
  195. sum += *pDualCoef++ * vecAddAcrossF32Mve(acc1);
  196. pSrcA += numCols * 2;
  197. row -= 2;
  198. }
  199. if (row >= 1) {
  200. f32x4_t vecIn, acc0;
  201. float32_t const *pSrcA0Vec, *pInVec;
  202. float32_t const *pSrcVecPtr = in;
  203. /*
  204. * Initialize the pointers to last MatrixA row
  205. */
  206. pInA0 = pSrcA;
  207. /*
  208. * Initialize the vector pointer
  209. */
  210. pInVec = pSrcVecPtr;
  211. /*
  212. * reset accumulators
  213. */
  214. acc0 = vdupq_n_f32(0.0f);
  215. pSrcA0Vec = pInA0;
  216. blkCnt = numCols >> 2;
  217. while (blkCnt > 0U) {
  218. f32x4_t vecA;
  219. vecIn = vld1q(pInVec);
  220. pInVec += 4;
  221. vecA = vld1q(pSrcA0Vec);
  222. pSrcA0Vec += 4;
  223. acc0 = vfmaq(acc0, vecIn, vecA);
  224. blkCnt--;
  225. }
  226. /*
  227. * tail
  228. * (will be merged thru tail predication)
  229. */
  230. blkCnt = numCols & 3;
  231. if (blkCnt > 0U) {
  232. mve_pred16_t p0 = vctp32q(blkCnt);
  233. f32x4_t vecA;
  234. vecIn = vldrwq_z_f32(pInVec, p0);
  235. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  236. acc0 = vfmaq(acc0, vecIn, vecA);
  237. }
  238. /*
  239. * Sum the partial parts
  240. */
  241. sum += *pDualCoef++ * vecAddAcrossF32Mve(acc0);
  242. }
  243. *pResult = S->classes[STEP(sum)];
  244. }
  245. #else
  246. #if defined(ARM_MATH_NEON)
  247. void arm_svm_linear_predict_f32(
  248. const arm_svm_linear_instance_f32 *S,
  249. const float32_t * in,
  250. int32_t * pResult)
  251. {
  252. float32_t sum = S->intercept;
  253. float32_t dot;
  254. float32x4_t dotV;
  255. float32x4_t accuma,accumb,accumc,accumd,accum;
  256. float32x2_t accum2;
  257. float32x4_t vec1;
  258. float32x4_t vec2,vec2a,vec2b,vec2c,vec2d;
  259. uint32_t blkCnt;
  260. uint32_t vectorBlkCnt;
  261. const float32_t *pIn = in;
  262. const float32_t *pSupport = S->supportVectors;
  263. const float32_t *pSupporta = S->supportVectors;
  264. const float32_t *pSupportb;
  265. const float32_t *pSupportc;
  266. const float32_t *pSupportd;
  267. pSupportb = pSupporta + S->vectorDimension;
  268. pSupportc = pSupportb + S->vectorDimension;
  269. pSupportd = pSupportc + S->vectorDimension;
  270. const float32_t *pDualCoefs = S->dualCoefficients;
  271. vectorBlkCnt = S->nbOfSupportVectors >> 2;
  272. while (vectorBlkCnt > 0U)
  273. {
  274. accuma = vdupq_n_f32(0);
  275. accumb = vdupq_n_f32(0);
  276. accumc = vdupq_n_f32(0);
  277. accumd = vdupq_n_f32(0);
  278. pIn = in;
  279. blkCnt = S->vectorDimension >> 2;
  280. while (blkCnt > 0U)
  281. {
  282. vec1 = vld1q_f32(pIn);
  283. vec2a = vld1q_f32(pSupporta);
  284. vec2b = vld1q_f32(pSupportb);
  285. vec2c = vld1q_f32(pSupportc);
  286. vec2d = vld1q_f32(pSupportd);
  287. pIn += 4;
  288. pSupporta += 4;
  289. pSupportb += 4;
  290. pSupportc += 4;
  291. pSupportd += 4;
  292. accuma = vmlaq_f32(accuma, vec1,vec2a);
  293. accumb = vmlaq_f32(accumb, vec1,vec2b);
  294. accumc = vmlaq_f32(accumc, vec1,vec2c);
  295. accumd = vmlaq_f32(accumd, vec1,vec2d);
  296. blkCnt -- ;
  297. }
  298. accum2 = vpadd_f32(vget_low_f32(accuma),vget_high_f32(accuma));
  299. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,0);
  300. accum2 = vpadd_f32(vget_low_f32(accumb),vget_high_f32(accumb));
  301. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,1);
  302. accum2 = vpadd_f32(vget_low_f32(accumc),vget_high_f32(accumc));
  303. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,2);
  304. accum2 = vpadd_f32(vget_low_f32(accumd),vget_high_f32(accumd));
  305. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,3);
  306. blkCnt = S->vectorDimension & 3;
  307. while (blkCnt > 0U)
  308. {
  309. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,0) + *pIn * *pSupporta++, dotV,0);
  310. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,1) + *pIn * *pSupportb++, dotV,1);
  311. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,2) + *pIn * *pSupportc++, dotV,2);
  312. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,3) + *pIn * *pSupportd++, dotV,3);
  313. pIn++;
  314. blkCnt -- ;
  315. }
  316. vec1 = vld1q_f32(pDualCoefs);
  317. pDualCoefs += 4;
  318. accum = vmulq_f32(vec1,dotV);
  319. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  320. sum += vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  321. pSupporta += 3*S->vectorDimension;
  322. pSupportb += 3*S->vectorDimension;
  323. pSupportc += 3*S->vectorDimension;
  324. pSupportd += 3*S->vectorDimension;
  325. vectorBlkCnt -- ;
  326. }
  327. pSupport = pSupporta;
  328. vectorBlkCnt = S->nbOfSupportVectors & 3;
  329. while (vectorBlkCnt > 0U)
  330. {
  331. accum = vdupq_n_f32(0);
  332. dot = 0.0f;
  333. pIn = in;
  334. blkCnt = S->vectorDimension >> 2;
  335. while (blkCnt > 0U)
  336. {
  337. vec1 = vld1q_f32(pIn);
  338. vec2 = vld1q_f32(pSupport);
  339. pIn += 4;
  340. pSupport += 4;
  341. accum = vmlaq_f32(accum, vec1,vec2);
  342. blkCnt -- ;
  343. }
  344. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  345. dot = vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  346. blkCnt = S->vectorDimension & 3;
  347. while (blkCnt > 0U)
  348. {
  349. dot = dot + *pIn++ * *pSupport++;
  350. blkCnt -- ;
  351. }
  352. sum += *pDualCoefs++ * dot;
  353. vectorBlkCnt -- ;
  354. }
  355. *pResult=S->classes[STEP(sum)];
  356. }
  357. #else
  358. void arm_svm_linear_predict_f32(
  359. const arm_svm_linear_instance_f32 *S,
  360. const float32_t * in,
  361. int32_t * pResult)
  362. {
  363. float32_t sum=S->intercept;
  364. float32_t dot=0;
  365. uint32_t i,j;
  366. const float32_t *pSupport = S->supportVectors;
  367. for(i=0; i < S->nbOfSupportVectors; i++)
  368. {
  369. dot=0;
  370. for(j=0; j < S->vectorDimension; j++)
  371. {
  372. dot = dot + in[j]* *pSupport++;
  373. }
  374. sum += S->dualCoefficients[i] * dot;
  375. }
  376. *pResult=S->classes[STEP(sum)];
  377. }
  378. #endif
  379. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  380. /**
  381. * @} end of groupSVM group
  382. */