arm_svm_rbf_predict_f32.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_svm_rbf_predict_f32.c
  4. * Description: SVM Radial Basis Function Classifier
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/svm_functions.h"
  29. #include <limits.h>
  30. #include <math.h>
  31. /**
  32. * @addtogroup rbfsvm
  33. * @{
  34. */
  35. /**
  36. * @brief SVM rbf prediction
  37. * @param[in] S Pointer to an instance of the rbf SVM structure.
  38. * @param[in] in Pointer to input vector
  39. * @param[out] pResult decision value
  40. *
  41. */
  42. #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
  43. #include "arm_helium_utils.h"
  44. #include "arm_vec_math.h"
  45. void arm_svm_rbf_predict_f32(
  46. const arm_svm_rbf_instance_f32 *S,
  47. const float32_t * in,
  48. int32_t * pResult)
  49. {
  50. /* inlined Matrix x Vector function interleaved with dot prod */
  51. uint32_t numRows = S->nbOfSupportVectors;
  52. uint32_t numCols = S->vectorDimension;
  53. const float32_t *pSupport = S->supportVectors;
  54. const float32_t *pSrcA = pSupport;
  55. const float32_t *pInA0;
  56. const float32_t *pInA1;
  57. uint32_t row;
  58. uint32_t blkCnt; /* loop counters */
  59. const float32_t *pDualCoef = S->dualCoefficients;
  60. float32_t sum = S->intercept;
  61. f32x4_t vSum = vdupq_n_f32(0);
  62. row = numRows;
  63. /*
  64. * compute 4 rows in parrallel
  65. */
  66. while (row >= 4) {
  67. const float32_t *pInA2, *pInA3;
  68. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec, *pInVec;
  69. f32x4_t vecIn, acc0, acc1, acc2, acc3;
  70. float32_t const *pSrcVecPtr = in;
  71. /*
  72. * Initialize the pointers to 4 consecutive MatrixA rows
  73. */
  74. pInA0 = pSrcA;
  75. pInA1 = pInA0 + numCols;
  76. pInA2 = pInA1 + numCols;
  77. pInA3 = pInA2 + numCols;
  78. /*
  79. * Initialize the vector pointer
  80. */
  81. pInVec = pSrcVecPtr;
  82. /*
  83. * reset accumulators
  84. */
  85. acc0 = vdupq_n_f32(0.0f);
  86. acc1 = vdupq_n_f32(0.0f);
  87. acc2 = vdupq_n_f32(0.0f);
  88. acc3 = vdupq_n_f32(0.0f);
  89. pSrcA0Vec = pInA0;
  90. pSrcA1Vec = pInA1;
  91. pSrcA2Vec = pInA2;
  92. pSrcA3Vec = pInA3;
  93. blkCnt = numCols >> 2;
  94. while (blkCnt > 0U) {
  95. f32x4_t vecA;
  96. f32x4_t vecDif;
  97. vecIn = vld1q(pInVec);
  98. pInVec += 4;
  99. vecA = vld1q(pSrcA0Vec);
  100. pSrcA0Vec += 4;
  101. vecDif = vsubq(vecIn, vecA);
  102. acc0 = vfmaq(acc0, vecDif, vecDif);
  103. vecA = vld1q(pSrcA1Vec);
  104. pSrcA1Vec += 4;
  105. vecDif = vsubq(vecIn, vecA);
  106. acc1 = vfmaq(acc1, vecDif, vecDif);
  107. vecA = vld1q(pSrcA2Vec);
  108. pSrcA2Vec += 4;
  109. vecDif = vsubq(vecIn, vecA);
  110. acc2 = vfmaq(acc2, vecDif, vecDif);
  111. vecA = vld1q(pSrcA3Vec);
  112. pSrcA3Vec += 4;
  113. vecDif = vsubq(vecIn, vecA);
  114. acc3 = vfmaq(acc3, vecDif, vecDif);
  115. blkCnt--;
  116. }
  117. /*
  118. * tail
  119. * (will be merged thru tail predication)
  120. */
  121. blkCnt = numCols & 3;
  122. if (blkCnt > 0U) {
  123. mve_pred16_t p0 = vctp32q(blkCnt);
  124. f32x4_t vecA;
  125. f32x4_t vecDif;
  126. vecIn = vldrwq_z_f32(pInVec, p0);
  127. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  128. vecDif = vsubq(vecIn, vecA);
  129. acc0 = vfmaq(acc0, vecDif, vecDif);
  130. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  131. vecDif = vsubq(vecIn, vecA);
  132. acc1 = vfmaq(acc1, vecDif, vecDif);
  133. vecA = vldrwq_z_f32(pSrcA2Vec, p0);;
  134. vecDif = vsubq(vecIn, vecA);
  135. acc2 = vfmaq(acc2, vecDif, vecDif);
  136. vecA = vldrwq_z_f32(pSrcA3Vec, p0);
  137. vecDif = vsubq(vecIn, vecA);
  138. acc3 = vfmaq(acc3, vecDif, vecDif);
  139. }
  140. /*
  141. * Sum the partial parts
  142. */
  143. //sum += *pDualCoef++ * expf(-S->gamma * vecReduceF32Mve(acc0));
  144. f32x4_t vtmp = vuninitializedq_f32();
  145. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc0), vtmp, 0);
  146. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc1), vtmp, 1);
  147. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc2), vtmp, 2);
  148. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc3), vtmp, 3);
  149. vSum =
  150. vfmaq_f32(vSum, vld1q(pDualCoef),
  151. vexpq_f32(vmulq_n_f32(vtmp, -S->gamma)));
  152. pDualCoef += 4;
  153. pSrcA += numCols * 4;
  154. /*
  155. * Decrement the row loop counter
  156. */
  157. row -= 4;
  158. }
  159. /*
  160. * compute 2 rows in parrallel
  161. */
  162. if (row >= 2) {
  163. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pInVec;
  164. f32x4_t vecIn, acc0, acc1;
  165. float32_t const *pSrcVecPtr = in;
  166. /*
  167. * Initialize the pointers to 2 consecutive MatrixA rows
  168. */
  169. pInA0 = pSrcA;
  170. pInA1 = pInA0 + numCols;
  171. /*
  172. * Initialize the vector pointer
  173. */
  174. pInVec = pSrcVecPtr;
  175. /*
  176. * reset accumulators
  177. */
  178. acc0 = vdupq_n_f32(0.0f);
  179. acc1 = vdupq_n_f32(0.0f);
  180. pSrcA0Vec = pInA0;
  181. pSrcA1Vec = pInA1;
  182. blkCnt = numCols >> 2;
  183. while (blkCnt > 0U) {
  184. f32x4_t vecA;
  185. f32x4_t vecDif;
  186. vecIn = vld1q(pInVec);
  187. pInVec += 4;
  188. vecA = vld1q(pSrcA0Vec);
  189. pSrcA0Vec += 4;
  190. vecDif = vsubq(vecIn, vecA);
  191. acc0 = vfmaq(acc0, vecDif, vecDif);;
  192. vecA = vld1q(pSrcA1Vec);
  193. pSrcA1Vec += 4;
  194. vecDif = vsubq(vecIn, vecA);
  195. acc1 = vfmaq(acc1, vecDif, vecDif);
  196. blkCnt--;
  197. }
  198. /*
  199. * tail
  200. * (will be merged thru tail predication)
  201. */
  202. blkCnt = numCols & 3;
  203. if (blkCnt > 0U) {
  204. mve_pred16_t p0 = vctp32q(blkCnt);
  205. f32x4_t vecA, vecDif;
  206. vecIn = vldrwq_z_f32(pInVec, p0);
  207. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  208. vecDif = vsubq(vecIn, vecA);
  209. acc0 = vfmaq(acc0, vecDif, vecDif);
  210. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  211. vecDif = vsubq(vecIn, vecA);
  212. acc1 = vfmaq(acc1, vecDif, vecDif);
  213. }
  214. /*
  215. * Sum the partial parts
  216. */
  217. f32x4_t vtmp = vuninitializedq_f32();
  218. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc0), vtmp, 0);
  219. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc1), vtmp, 1);
  220. vSum =
  221. vfmaq_m_f32(vSum, vld1q(pDualCoef),
  222. vexpq_f32(vmulq_n_f32(vtmp, -S->gamma)), vctp32q(2));
  223. pDualCoef += 2;
  224. pSrcA += numCols * 2;
  225. row -= 2;
  226. }
  227. if (row >= 1) {
  228. f32x4_t vecIn, acc0;
  229. float32_t const *pSrcA0Vec, *pInVec;
  230. float32_t const *pSrcVecPtr = in;
  231. /*
  232. * Initialize the pointers to last MatrixA row
  233. */
  234. pInA0 = pSrcA;
  235. /*
  236. * Initialize the vector pointer
  237. */
  238. pInVec = pSrcVecPtr;
  239. /*
  240. * reset accumulators
  241. */
  242. acc0 = vdupq_n_f32(0.0f);
  243. pSrcA0Vec = pInA0;
  244. blkCnt = numCols >> 2;
  245. while (blkCnt > 0U) {
  246. f32x4_t vecA, vecDif;
  247. vecIn = vld1q(pInVec);
  248. pInVec += 4;
  249. vecA = vld1q(pSrcA0Vec);
  250. pSrcA0Vec += 4;
  251. vecDif = vsubq(vecIn, vecA);
  252. acc0 = vfmaq(acc0, vecDif, vecDif);
  253. blkCnt--;
  254. }
  255. /*
  256. * tail
  257. * (will be merged thru tail predication)
  258. */
  259. blkCnt = numCols & 3;
  260. if (blkCnt > 0U) {
  261. mve_pred16_t p0 = vctp32q(blkCnt);
  262. f32x4_t vecA, vecDif;
  263. vecIn = vldrwq_z_f32(pInVec, p0);
  264. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  265. vecDif = vsubq(vecIn, vecA);
  266. acc0 = vfmaq(acc0, vecDif, vecDif);
  267. }
  268. /*
  269. * Sum the partial parts
  270. */
  271. f32x4_t vtmp = vuninitializedq_f32();
  272. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc0), vtmp, 0);
  273. vSum =
  274. vfmaq_m_f32(vSum, vld1q(pDualCoef),
  275. vexpq_f32(vmulq_n_f32(vtmp, -S->gamma)), vctp32q(1));
  276. }
  277. sum += vecAddAcrossF32Mve(vSum);
  278. *pResult = S->classes[STEP(sum)];
  279. }
  280. #else
  281. #if defined(ARM_MATH_NEON)
  282. #include "NEMath.h"
  283. void arm_svm_rbf_predict_f32(
  284. const arm_svm_rbf_instance_f32 *S,
  285. const float32_t * in,
  286. int32_t * pResult)
  287. {
  288. float32_t sum = S->intercept;
  289. float32_t dot;
  290. float32x4_t dotV;
  291. float32x4_t accuma,accumb,accumc,accumd,accum;
  292. float32x2_t accum2;
  293. float32x4_t temp;
  294. float32x4_t vec1;
  295. float32x4_t vec2,vec2a,vec2b,vec2c,vec2d;
  296. uint32_t blkCnt;
  297. uint32_t vectorBlkCnt;
  298. const float32_t *pIn = in;
  299. const float32_t *pSupport = S->supportVectors;
  300. const float32_t *pSupporta = S->supportVectors;
  301. const float32_t *pSupportb;
  302. const float32_t *pSupportc;
  303. const float32_t *pSupportd;
  304. pSupportb = pSupporta + S->vectorDimension;
  305. pSupportc = pSupportb + S->vectorDimension;
  306. pSupportd = pSupportc + S->vectorDimension;
  307. const float32_t *pDualCoefs = S->dualCoefficients;
  308. vectorBlkCnt = S->nbOfSupportVectors >> 2;
  309. while (vectorBlkCnt > 0U)
  310. {
  311. accuma = vdupq_n_f32(0);
  312. accumb = vdupq_n_f32(0);
  313. accumc = vdupq_n_f32(0);
  314. accumd = vdupq_n_f32(0);
  315. pIn = in;
  316. blkCnt = S->vectorDimension >> 2;
  317. while (blkCnt > 0U)
  318. {
  319. vec1 = vld1q_f32(pIn);
  320. vec2a = vld1q_f32(pSupporta);
  321. vec2b = vld1q_f32(pSupportb);
  322. vec2c = vld1q_f32(pSupportc);
  323. vec2d = vld1q_f32(pSupportd);
  324. pIn += 4;
  325. pSupporta += 4;
  326. pSupportb += 4;
  327. pSupportc += 4;
  328. pSupportd += 4;
  329. temp = vsubq_f32(vec1, vec2a);
  330. accuma = vmlaq_f32(accuma, temp, temp);
  331. temp = vsubq_f32(vec1, vec2b);
  332. accumb = vmlaq_f32(accumb, temp, temp);
  333. temp = vsubq_f32(vec1, vec2c);
  334. accumc = vmlaq_f32(accumc, temp, temp);
  335. temp = vsubq_f32(vec1, vec2d);
  336. accumd = vmlaq_f32(accumd, temp, temp);
  337. blkCnt -- ;
  338. }
  339. accum2 = vpadd_f32(vget_low_f32(accuma),vget_high_f32(accuma));
  340. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,0);
  341. accum2 = vpadd_f32(vget_low_f32(accumb),vget_high_f32(accumb));
  342. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,1);
  343. accum2 = vpadd_f32(vget_low_f32(accumc),vget_high_f32(accumc));
  344. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,2);
  345. accum2 = vpadd_f32(vget_low_f32(accumd),vget_high_f32(accumd));
  346. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,3);
  347. blkCnt = S->vectorDimension & 3;
  348. while (blkCnt > 0U)
  349. {
  350. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,0) + ARM_SQ(*pIn - *pSupporta), dotV,0);
  351. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,1) + ARM_SQ(*pIn - *pSupportb), dotV,1);
  352. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,2) + ARM_SQ(*pIn - *pSupportc), dotV,2);
  353. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,3) + ARM_SQ(*pIn - *pSupportd), dotV,3);
  354. pSupporta++;
  355. pSupportb++;
  356. pSupportc++;
  357. pSupportd++;
  358. pIn++;
  359. blkCnt -- ;
  360. }
  361. vec1 = vld1q_f32(pDualCoefs);
  362. pDualCoefs += 4;
  363. // To vectorize later
  364. dotV = vmulq_n_f32(dotV, -S->gamma);
  365. dotV = vexpq_f32(dotV);
  366. accum = vmulq_f32(vec1,dotV);
  367. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  368. sum += vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  369. pSupporta += 3*S->vectorDimension;
  370. pSupportb += 3*S->vectorDimension;
  371. pSupportc += 3*S->vectorDimension;
  372. pSupportd += 3*S->vectorDimension;
  373. vectorBlkCnt -- ;
  374. }
  375. pSupport = pSupporta;
  376. vectorBlkCnt = S->nbOfSupportVectors & 3;
  377. while (vectorBlkCnt > 0U)
  378. {
  379. accum = vdupq_n_f32(0);
  380. dot = 0.0f;
  381. pIn = in;
  382. blkCnt = S->vectorDimension >> 2;
  383. while (blkCnt > 0U)
  384. {
  385. vec1 = vld1q_f32(pIn);
  386. vec2 = vld1q_f32(pSupport);
  387. pIn += 4;
  388. pSupport += 4;
  389. temp = vsubq_f32(vec1,vec2);
  390. accum = vmlaq_f32(accum, temp,temp);
  391. blkCnt -- ;
  392. }
  393. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  394. dot = vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  395. blkCnt = S->vectorDimension & 3;
  396. while (blkCnt > 0U)
  397. {
  398. dot = dot + ARM_SQ(*pIn - *pSupport);
  399. pIn++;
  400. pSupport++;
  401. blkCnt -- ;
  402. }
  403. sum += *pDualCoefs++ * expf(-S->gamma * dot);
  404. vectorBlkCnt -- ;
  405. }
  406. *pResult=S->classes[STEP(sum)];
  407. }
  408. #else
  409. void arm_svm_rbf_predict_f32(
  410. const arm_svm_rbf_instance_f32 *S,
  411. const float32_t * in,
  412. int32_t * pResult)
  413. {
  414. float32_t sum=S->intercept;
  415. float32_t dot=0;
  416. uint32_t i,j;
  417. const float32_t *pSupport = S->supportVectors;
  418. for(i=0; i < S->nbOfSupportVectors; i++)
  419. {
  420. dot=0;
  421. for(j=0; j < S->vectorDimension; j++)
  422. {
  423. dot = dot + ARM_SQ(in[j] - *pSupport);
  424. pSupport++;
  425. }
  426. sum += S->dualCoefficients[i] * expf(-S->gamma * dot);
  427. }
  428. *pResult=S->classes[STEP(sum)];
  429. }
  430. #endif
  431. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  432. /**
  433. * @} end of rbfsvm group
  434. */