arm_svm_rbf_predict_f32.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_svm_rbf_predict_f32.c
  4. * Description: SVM Radial Basis Function Classifier
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/svm_functions.h"
  29. #include <limits.h>
  30. #include <math.h>
  31. /**
  32. * @addtogroup rbfsvm
  33. * @{
  34. */
  35. /**
  36. * @brief SVM rbf prediction
  37. * @param[in] S Pointer to an instance of the rbf SVM structure.
  38. * @param[in] in Pointer to input vector
  39. * @param[out] pResult decision value
  40. * @return none.
  41. *
  42. */
  43. #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
  44. #include "arm_helium_utils.h"
  45. #include "arm_vec_math.h"
  46. void arm_svm_rbf_predict_f32(
  47. const arm_svm_rbf_instance_f32 *S,
  48. const float32_t * in,
  49. int32_t * pResult)
  50. {
  51. /* inlined Matrix x Vector function interleaved with dot prod */
  52. uint32_t numRows = S->nbOfSupportVectors;
  53. uint32_t numCols = S->vectorDimension;
  54. const float32_t *pSupport = S->supportVectors;
  55. const float32_t *pSrcA = pSupport;
  56. const float32_t *pInA0;
  57. const float32_t *pInA1;
  58. uint32_t row;
  59. uint32_t blkCnt; /* loop counters */
  60. const float32_t *pDualCoef = S->dualCoefficients;
  61. float32_t sum = S->intercept;
  62. f32x4_t vSum = vdupq_n_f32(0);
  63. row = numRows;
  64. /*
  65. * compute 4 rows in parrallel
  66. */
  67. while (row >= 4) {
  68. const float32_t *pInA2, *pInA3;
  69. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec, *pInVec;
  70. f32x4_t vecIn, acc0, acc1, acc2, acc3;
  71. float32_t const *pSrcVecPtr = in;
  72. /*
  73. * Initialize the pointers to 4 consecutive MatrixA rows
  74. */
  75. pInA0 = pSrcA;
  76. pInA1 = pInA0 + numCols;
  77. pInA2 = pInA1 + numCols;
  78. pInA3 = pInA2 + numCols;
  79. /*
  80. * Initialize the vector pointer
  81. */
  82. pInVec = pSrcVecPtr;
  83. /*
  84. * reset accumulators
  85. */
  86. acc0 = vdupq_n_f32(0.0f);
  87. acc1 = vdupq_n_f32(0.0f);
  88. acc2 = vdupq_n_f32(0.0f);
  89. acc3 = vdupq_n_f32(0.0f);
  90. pSrcA0Vec = pInA0;
  91. pSrcA1Vec = pInA1;
  92. pSrcA2Vec = pInA2;
  93. pSrcA3Vec = pInA3;
  94. blkCnt = numCols >> 2;
  95. while (blkCnt > 0U) {
  96. f32x4_t vecA;
  97. f32x4_t vecDif;
  98. vecIn = vld1q(pInVec);
  99. pInVec += 4;
  100. vecA = vld1q(pSrcA0Vec);
  101. pSrcA0Vec += 4;
  102. vecDif = vsubq(vecIn, vecA);
  103. acc0 = vfmaq(acc0, vecDif, vecDif);
  104. vecA = vld1q(pSrcA1Vec);
  105. pSrcA1Vec += 4;
  106. vecDif = vsubq(vecIn, vecA);
  107. acc1 = vfmaq(acc1, vecDif, vecDif);
  108. vecA = vld1q(pSrcA2Vec);
  109. pSrcA2Vec += 4;
  110. vecDif = vsubq(vecIn, vecA);
  111. acc2 = vfmaq(acc2, vecDif, vecDif);
  112. vecA = vld1q(pSrcA3Vec);
  113. pSrcA3Vec += 4;
  114. vecDif = vsubq(vecIn, vecA);
  115. acc3 = vfmaq(acc3, vecDif, vecDif);
  116. blkCnt--;
  117. }
  118. /*
  119. * tail
  120. * (will be merged thru tail predication)
  121. */
  122. blkCnt = numCols & 3;
  123. if (blkCnt > 0U) {
  124. mve_pred16_t p0 = vctp32q(blkCnt);
  125. f32x4_t vecA;
  126. f32x4_t vecDif;
  127. vecIn = vldrwq_z_f32(pInVec, p0);
  128. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  129. vecDif = vsubq(vecIn, vecA);
  130. acc0 = vfmaq(acc0, vecDif, vecDif);
  131. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  132. vecDif = vsubq(vecIn, vecA);
  133. acc1 = vfmaq(acc1, vecDif, vecDif);
  134. vecA = vldrwq_z_f32(pSrcA2Vec, p0);;
  135. vecDif = vsubq(vecIn, vecA);
  136. acc2 = vfmaq(acc2, vecDif, vecDif);
  137. vecA = vldrwq_z_f32(pSrcA3Vec, p0);
  138. vecDif = vsubq(vecIn, vecA);
  139. acc3 = vfmaq(acc3, vecDif, vecDif);
  140. }
  141. /*
  142. * Sum the partial parts
  143. */
  144. //sum += *pDualCoef++ * expf(-S->gamma * vecReduceF32Mve(acc0));
  145. f32x4_t vtmp = vuninitializedq_f32();
  146. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc0), vtmp, 0);
  147. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc1), vtmp, 1);
  148. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc2), vtmp, 2);
  149. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc3), vtmp, 3);
  150. vSum =
  151. vfmaq_f32(vSum, vld1q(pDualCoef),
  152. vexpq_f32(vmulq_n_f32(vtmp, -S->gamma)));
  153. pDualCoef += 4;
  154. pSrcA += numCols * 4;
  155. /*
  156. * Decrement the row loop counter
  157. */
  158. row -= 4;
  159. }
  160. /*
  161. * compute 2 rows in parrallel
  162. */
  163. if (row >= 2) {
  164. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pInVec;
  165. f32x4_t vecIn, acc0, acc1;
  166. float32_t const *pSrcVecPtr = in;
  167. /*
  168. * Initialize the pointers to 2 consecutive MatrixA rows
  169. */
  170. pInA0 = pSrcA;
  171. pInA1 = pInA0 + numCols;
  172. /*
  173. * Initialize the vector pointer
  174. */
  175. pInVec = pSrcVecPtr;
  176. /*
  177. * reset accumulators
  178. */
  179. acc0 = vdupq_n_f32(0.0f);
  180. acc1 = vdupq_n_f32(0.0f);
  181. pSrcA0Vec = pInA0;
  182. pSrcA1Vec = pInA1;
  183. blkCnt = numCols >> 2;
  184. while (blkCnt > 0U) {
  185. f32x4_t vecA;
  186. f32x4_t vecDif;
  187. vecIn = vld1q(pInVec);
  188. pInVec += 4;
  189. vecA = vld1q(pSrcA0Vec);
  190. pSrcA0Vec += 4;
  191. vecDif = vsubq(vecIn, vecA);
  192. acc0 = vfmaq(acc0, vecDif, vecDif);;
  193. vecA = vld1q(pSrcA1Vec);
  194. pSrcA1Vec += 4;
  195. vecDif = vsubq(vecIn, vecA);
  196. acc1 = vfmaq(acc1, vecDif, vecDif);
  197. blkCnt--;
  198. }
  199. /*
  200. * tail
  201. * (will be merged thru tail predication)
  202. */
  203. blkCnt = numCols & 3;
  204. if (blkCnt > 0U) {
  205. mve_pred16_t p0 = vctp32q(blkCnt);
  206. f32x4_t vecA, vecDif;
  207. vecIn = vldrwq_z_f32(pInVec, p0);
  208. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  209. vecDif = vsubq(vecIn, vecA);
  210. acc0 = vfmaq(acc0, vecDif, vecDif);
  211. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  212. vecDif = vsubq(vecIn, vecA);
  213. acc1 = vfmaq(acc1, vecDif, vecDif);
  214. }
  215. /*
  216. * Sum the partial parts
  217. */
  218. f32x4_t vtmp = vuninitializedq_f32();
  219. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc0), vtmp, 0);
  220. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc1), vtmp, 1);
  221. vSum =
  222. vfmaq_m_f32(vSum, vld1q(pDualCoef),
  223. vexpq_f32(vmulq_n_f32(vtmp, -S->gamma)), vctp32q(2));
  224. pDualCoef += 2;
  225. pSrcA += numCols * 2;
  226. row -= 2;
  227. }
  228. if (row >= 1) {
  229. f32x4_t vecIn, acc0;
  230. float32_t const *pSrcA0Vec, *pInVec;
  231. float32_t const *pSrcVecPtr = in;
  232. /*
  233. * Initialize the pointers to last MatrixA row
  234. */
  235. pInA0 = pSrcA;
  236. /*
  237. * Initialize the vector pointer
  238. */
  239. pInVec = pSrcVecPtr;
  240. /*
  241. * reset accumulators
  242. */
  243. acc0 = vdupq_n_f32(0.0f);
  244. pSrcA0Vec = pInA0;
  245. blkCnt = numCols >> 2;
  246. while (blkCnt > 0U) {
  247. f32x4_t vecA, vecDif;
  248. vecIn = vld1q(pInVec);
  249. pInVec += 4;
  250. vecA = vld1q(pSrcA0Vec);
  251. pSrcA0Vec += 4;
  252. vecDif = vsubq(vecIn, vecA);
  253. acc0 = vfmaq(acc0, vecDif, vecDif);
  254. blkCnt--;
  255. }
  256. /*
  257. * tail
  258. * (will be merged thru tail predication)
  259. */
  260. blkCnt = numCols & 3;
  261. if (blkCnt > 0U) {
  262. mve_pred16_t p0 = vctp32q(blkCnt);
  263. f32x4_t vecA, vecDif;
  264. vecIn = vldrwq_z_f32(pInVec, p0);
  265. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  266. vecDif = vsubq(vecIn, vecA);
  267. acc0 = vfmaq(acc0, vecDif, vecDif);
  268. }
  269. /*
  270. * Sum the partial parts
  271. */
  272. f32x4_t vtmp = vuninitializedq_f32();
  273. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc0), vtmp, 0);
  274. vSum =
  275. vfmaq_m_f32(vSum, vld1q(pDualCoef),
  276. vexpq_f32(vmulq_n_f32(vtmp, -S->gamma)), vctp32q(1));
  277. }
  278. sum += vecAddAcrossF32Mve(vSum);
  279. *pResult = S->classes[STEP(sum)];
  280. }
  281. #else
  282. #if defined(ARM_MATH_NEON)
  283. #include "NEMath.h"
  284. void arm_svm_rbf_predict_f32(
  285. const arm_svm_rbf_instance_f32 *S,
  286. const float32_t * in,
  287. int32_t * pResult)
  288. {
  289. float32_t sum = S->intercept;
  290. float32_t dot;
  291. float32x4_t dotV;
  292. float32x4_t accuma,accumb,accumc,accumd,accum;
  293. float32x2_t accum2;
  294. float32x4_t temp;
  295. float32x4_t vec1;
  296. float32x4_t vec2,vec2a,vec2b,vec2c,vec2d;
  297. uint32_t blkCnt;
  298. uint32_t vectorBlkCnt;
  299. const float32_t *pIn = in;
  300. const float32_t *pSupport = S->supportVectors;
  301. const float32_t *pSupporta = S->supportVectors;
  302. const float32_t *pSupportb;
  303. const float32_t *pSupportc;
  304. const float32_t *pSupportd;
  305. pSupportb = pSupporta + S->vectorDimension;
  306. pSupportc = pSupportb + S->vectorDimension;
  307. pSupportd = pSupportc + S->vectorDimension;
  308. const float32_t *pDualCoefs = S->dualCoefficients;
  309. vectorBlkCnt = S->nbOfSupportVectors >> 2;
  310. while (vectorBlkCnt > 0U)
  311. {
  312. accuma = vdupq_n_f32(0);
  313. accumb = vdupq_n_f32(0);
  314. accumc = vdupq_n_f32(0);
  315. accumd = vdupq_n_f32(0);
  316. pIn = in;
  317. blkCnt = S->vectorDimension >> 2;
  318. while (blkCnt > 0U)
  319. {
  320. vec1 = vld1q_f32(pIn);
  321. vec2a = vld1q_f32(pSupporta);
  322. vec2b = vld1q_f32(pSupportb);
  323. vec2c = vld1q_f32(pSupportc);
  324. vec2d = vld1q_f32(pSupportd);
  325. pIn += 4;
  326. pSupporta += 4;
  327. pSupportb += 4;
  328. pSupportc += 4;
  329. pSupportd += 4;
  330. temp = vsubq_f32(vec1, vec2a);
  331. accuma = vmlaq_f32(accuma, temp, temp);
  332. temp = vsubq_f32(vec1, vec2b);
  333. accumb = vmlaq_f32(accumb, temp, temp);
  334. temp = vsubq_f32(vec1, vec2c);
  335. accumc = vmlaq_f32(accumc, temp, temp);
  336. temp = vsubq_f32(vec1, vec2d);
  337. accumd = vmlaq_f32(accumd, temp, temp);
  338. blkCnt -- ;
  339. }
  340. accum2 = vpadd_f32(vget_low_f32(accuma),vget_high_f32(accuma));
  341. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,0);
  342. accum2 = vpadd_f32(vget_low_f32(accumb),vget_high_f32(accumb));
  343. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,1);
  344. accum2 = vpadd_f32(vget_low_f32(accumc),vget_high_f32(accumc));
  345. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,2);
  346. accum2 = vpadd_f32(vget_low_f32(accumd),vget_high_f32(accumd));
  347. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,3);
  348. blkCnt = S->vectorDimension & 3;
  349. while (blkCnt > 0U)
  350. {
  351. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,0) + SQ(*pIn - *pSupporta), dotV,0);
  352. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,1) + SQ(*pIn - *pSupportb), dotV,1);
  353. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,2) + SQ(*pIn - *pSupportc), dotV,2);
  354. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,3) + SQ(*pIn - *pSupportd), dotV,3);
  355. pSupporta++;
  356. pSupportb++;
  357. pSupportc++;
  358. pSupportd++;
  359. pIn++;
  360. blkCnt -- ;
  361. }
  362. vec1 = vld1q_f32(pDualCoefs);
  363. pDualCoefs += 4;
  364. // To vectorize later
  365. dotV = vmulq_n_f32(dotV, -S->gamma);
  366. dotV = vexpq_f32(dotV);
  367. accum = vmulq_f32(vec1,dotV);
  368. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  369. sum += vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  370. pSupporta += 3*S->vectorDimension;
  371. pSupportb += 3*S->vectorDimension;
  372. pSupportc += 3*S->vectorDimension;
  373. pSupportd += 3*S->vectorDimension;
  374. vectorBlkCnt -- ;
  375. }
  376. pSupport = pSupporta;
  377. vectorBlkCnt = S->nbOfSupportVectors & 3;
  378. while (vectorBlkCnt > 0U)
  379. {
  380. accum = vdupq_n_f32(0);
  381. dot = 0.0f;
  382. pIn = in;
  383. blkCnt = S->vectorDimension >> 2;
  384. while (blkCnt > 0U)
  385. {
  386. vec1 = vld1q_f32(pIn);
  387. vec2 = vld1q_f32(pSupport);
  388. pIn += 4;
  389. pSupport += 4;
  390. temp = vsubq_f32(vec1,vec2);
  391. accum = vmlaq_f32(accum, temp,temp);
  392. blkCnt -- ;
  393. }
  394. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  395. dot = vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  396. blkCnt = S->vectorDimension & 3;
  397. while (blkCnt > 0U)
  398. {
  399. dot = dot + SQ(*pIn - *pSupport);
  400. pIn++;
  401. pSupport++;
  402. blkCnt -- ;
  403. }
  404. sum += *pDualCoefs++ * expf(-S->gamma * dot);
  405. vectorBlkCnt -- ;
  406. }
  407. *pResult=S->classes[STEP(sum)];
  408. }
  409. #else
  410. void arm_svm_rbf_predict_f32(
  411. const arm_svm_rbf_instance_f32 *S,
  412. const float32_t * in,
  413. int32_t * pResult)
  414. {
  415. float32_t sum=S->intercept;
  416. float32_t dot=0;
  417. uint32_t i,j;
  418. const float32_t *pSupport = S->supportVectors;
  419. for(i=0; i < S->nbOfSupportVectors; i++)
  420. {
  421. dot=0;
  422. for(j=0; j < S->vectorDimension; j++)
  423. {
  424. dot = dot + SQ(in[j] - *pSupport);
  425. pSupport++;
  426. }
  427. sum += S->dualCoefficients[i] * expf(-S->gamma * dot);
  428. }
  429. *pResult=S->classes[STEP(sum)];
  430. }
  431. #endif
  432. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  433. /**
  434. * @} end of rbfsvm group
  435. */