arm_svm_rbf_predict_f32.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_svm_rbf_predict_f32.c
  4. * Description: SVM Radial Basis Function Classifier
  5. *
  6. *
  7. * Target Processor: Cortex-M and Cortex-A cores
  8. * -------------------------------------------------------------------- */
  9. /*
  10. * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.
  11. *
  12. * SPDX-License-Identifier: Apache-2.0
  13. *
  14. * Licensed under the Apache License, Version 2.0 (the License); you may
  15. * not use this file except in compliance with the License.
  16. * You may obtain a copy of the License at
  17. *
  18. * www.apache.org/licenses/LICENSE-2.0
  19. *
  20. * Unless required by applicable law or agreed to in writing, software
  21. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  22. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  23. * See the License for the specific language governing permissions and
  24. * limitations under the License.
  25. */
  26. #include "arm_math.h"
  27. #include <limits.h>
  28. #include <math.h>
  29. /**
  30. * @addtogroup groupSVM
  31. * @{
  32. */
  33. /**
  34. * @brief SVM rbf prediction
  35. * @param[in] S Pointer to an instance of the rbf SVM structure.
  36. * @param[in] in Pointer to input vector
  37. * @param[out] pResult decision value
  38. * @return none.
  39. *
  40. */
  41. #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
  42. #include "arm_helium_utils.h"
  43. #include "arm_vec_math.h"
  44. void arm_svm_rbf_predict_f32(
  45. const arm_svm_rbf_instance_f32 *S,
  46. const float32_t * in,
  47. int32_t * pResult)
  48. {
  49. /* inlined Matrix x Vector function interleaved with dot prod */
  50. uint32_t numRows = S->nbOfSupportVectors;
  51. uint32_t numCols = S->vectorDimension;
  52. const float32_t *pSupport = S->supportVectors;
  53. const float32_t *pSrcA = pSupport;
  54. const float32_t *pInA0;
  55. const float32_t *pInA1;
  56. uint32_t row;
  57. uint32_t blkCnt; /* loop counters */
  58. const float32_t *pDualCoef = S->dualCoefficients;
  59. float32_t sum = S->intercept;
  60. f32x4_t vSum = vdupq_n_f32(0);
  61. row = numRows;
  62. /*
  63. * compute 4 rows in parrallel
  64. */
  65. while (row >= 4) {
  66. const float32_t *pInA2, *pInA3;
  67. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec, *pInVec;
  68. f32x4_t vecIn, acc0, acc1, acc2, acc3;
  69. float32_t const *pSrcVecPtr = in;
  70. /*
  71. * Initialize the pointers to 4 consecutive MatrixA rows
  72. */
  73. pInA0 = pSrcA;
  74. pInA1 = pInA0 + numCols;
  75. pInA2 = pInA1 + numCols;
  76. pInA3 = pInA2 + numCols;
  77. /*
  78. * Initialize the vector pointer
  79. */
  80. pInVec = pSrcVecPtr;
  81. /*
  82. * reset accumulators
  83. */
  84. acc0 = vdupq_n_f32(0.0f);
  85. acc1 = vdupq_n_f32(0.0f);
  86. acc2 = vdupq_n_f32(0.0f);
  87. acc3 = vdupq_n_f32(0.0f);
  88. pSrcA0Vec = pInA0;
  89. pSrcA1Vec = pInA1;
  90. pSrcA2Vec = pInA2;
  91. pSrcA3Vec = pInA3;
  92. blkCnt = numCols >> 2;
  93. while (blkCnt > 0U) {
  94. f32x4_t vecA;
  95. f32x4_t vecDif;
  96. vecIn = vld1q(pInVec);
  97. pInVec += 4;
  98. vecA = vld1q(pSrcA0Vec);
  99. pSrcA0Vec += 4;
  100. vecDif = vsubq(vecIn, vecA);
  101. acc0 = vfmaq(acc0, vecDif, vecDif);
  102. vecA = vld1q(pSrcA1Vec);
  103. pSrcA1Vec += 4;
  104. vecDif = vsubq(vecIn, vecA);
  105. acc1 = vfmaq(acc1, vecDif, vecDif);
  106. vecA = vld1q(pSrcA2Vec);
  107. pSrcA2Vec += 4;
  108. vecDif = vsubq(vecIn, vecA);
  109. acc2 = vfmaq(acc2, vecDif, vecDif);
  110. vecA = vld1q(pSrcA3Vec);
  111. pSrcA3Vec += 4;
  112. vecDif = vsubq(vecIn, vecA);
  113. acc3 = vfmaq(acc3, vecDif, vecDif);
  114. blkCnt--;
  115. }
  116. /*
  117. * tail
  118. * (will be merged thru tail predication)
  119. */
  120. blkCnt = numCols & 3;
  121. if (blkCnt > 0U) {
  122. mve_pred16_t p0 = vctp32q(blkCnt);
  123. f32x4_t vecA;
  124. f32x4_t vecDif;
  125. vecIn = vldrwq_z_f32(pInVec, p0);
  126. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  127. vecDif = vsubq(vecIn, vecA);
  128. acc0 = vfmaq(acc0, vecDif, vecDif);
  129. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  130. vecDif = vsubq(vecIn, vecA);
  131. acc1 = vfmaq(acc1, vecDif, vecDif);
  132. vecA = vldrwq_z_f32(pSrcA2Vec, p0);;
  133. vecDif = vsubq(vecIn, vecA);
  134. acc2 = vfmaq(acc2, vecDif, vecDif);
  135. vecA = vldrwq_z_f32(pSrcA3Vec, p0);
  136. vecDif = vsubq(vecIn, vecA);
  137. acc3 = vfmaq(acc3, vecDif, vecDif);
  138. }
  139. /*
  140. * Sum the partial parts
  141. */
  142. //sum += *pDualCoef++ * expf(-S->gamma * vecReduceF32Mve(acc0));
  143. f32x4_t vtmp = vuninitializedq_f32();
  144. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc0), vtmp, 0);
  145. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc1), vtmp, 1);
  146. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc2), vtmp, 2);
  147. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc3), vtmp, 3);
  148. vSum =
  149. vfmaq_f32(vSum, vld1q(pDualCoef),
  150. vexpq_f32(vmulq_n_f32(vtmp, -S->gamma)));
  151. pDualCoef += 4;
  152. pSrcA += numCols * 4;
  153. /*
  154. * Decrement the row loop counter
  155. */
  156. row -= 4;
  157. }
  158. /*
  159. * compute 2 rows in parrallel
  160. */
  161. if (row >= 2) {
  162. float32_t const *pSrcA0Vec, *pSrcA1Vec, *pInVec;
  163. f32x4_t vecIn, acc0, acc1;
  164. float32_t const *pSrcVecPtr = in;
  165. /*
  166. * Initialize the pointers to 2 consecutive MatrixA rows
  167. */
  168. pInA0 = pSrcA;
  169. pInA1 = pInA0 + numCols;
  170. /*
  171. * Initialize the vector pointer
  172. */
  173. pInVec = pSrcVecPtr;
  174. /*
  175. * reset accumulators
  176. */
  177. acc0 = vdupq_n_f32(0.0f);
  178. acc1 = vdupq_n_f32(0.0f);
  179. pSrcA0Vec = pInA0;
  180. pSrcA1Vec = pInA1;
  181. blkCnt = numCols >> 2;
  182. while (blkCnt > 0U) {
  183. f32x4_t vecA;
  184. f32x4_t vecDif;
  185. vecIn = vld1q(pInVec);
  186. pInVec += 4;
  187. vecA = vld1q(pSrcA0Vec);
  188. pSrcA0Vec += 4;
  189. vecDif = vsubq(vecIn, vecA);
  190. acc0 = vfmaq(acc0, vecDif, vecDif);;
  191. vecA = vld1q(pSrcA1Vec);
  192. pSrcA1Vec += 4;
  193. vecDif = vsubq(vecIn, vecA);
  194. acc1 = vfmaq(acc1, vecDif, vecDif);
  195. blkCnt--;
  196. }
  197. /*
  198. * tail
  199. * (will be merged thru tail predication)
  200. */
  201. blkCnt = numCols & 3;
  202. if (blkCnt > 0U) {
  203. mve_pred16_t p0 = vctp32q(blkCnt);
  204. f32x4_t vecA, vecDif;
  205. vecIn = vldrwq_z_f32(pInVec, p0);
  206. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  207. vecDif = vsubq(vecIn, vecA);
  208. acc0 = vfmaq(acc0, vecDif, vecDif);
  209. vecA = vldrwq_z_f32(pSrcA1Vec, p0);
  210. vecDif = vsubq(vecIn, vecA);
  211. acc1 = vfmaq(acc1, vecDif, vecDif);
  212. }
  213. /*
  214. * Sum the partial parts
  215. */
  216. f32x4_t vtmp = vuninitializedq_f32();
  217. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc0), vtmp, 0);
  218. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc1), vtmp, 1);
  219. vSum =
  220. vfmaq_m_f32(vSum, vld1q(pDualCoef),
  221. vexpq_f32(vmulq_n_f32(vtmp, -S->gamma)), vctp32q(2));
  222. pDualCoef += 2;
  223. pSrcA += numCols * 2;
  224. row -= 2;
  225. }
  226. if (row >= 1) {
  227. f32x4_t vecIn, acc0;
  228. float32_t const *pSrcA0Vec, *pInVec;
  229. float32_t const *pSrcVecPtr = in;
  230. /*
  231. * Initialize the pointers to last MatrixA row
  232. */
  233. pInA0 = pSrcA;
  234. /*
  235. * Initialize the vector pointer
  236. */
  237. pInVec = pSrcVecPtr;
  238. /*
  239. * reset accumulators
  240. */
  241. acc0 = vdupq_n_f32(0.0f);
  242. pSrcA0Vec = pInA0;
  243. blkCnt = numCols >> 2;
  244. while (blkCnt > 0U) {
  245. f32x4_t vecA, vecDif;
  246. vecIn = vld1q(pInVec);
  247. pInVec += 4;
  248. vecA = vld1q(pSrcA0Vec);
  249. pSrcA0Vec += 4;
  250. vecDif = vsubq(vecIn, vecA);
  251. acc0 = vfmaq(acc0, vecDif, vecDif);
  252. blkCnt--;
  253. }
  254. /*
  255. * tail
  256. * (will be merged thru tail predication)
  257. */
  258. blkCnt = numCols & 3;
  259. if (blkCnt > 0U) {
  260. mve_pred16_t p0 = vctp32q(blkCnt);
  261. f32x4_t vecA, vecDif;
  262. vecIn = vldrwq_z_f32(pInVec, p0);
  263. vecA = vldrwq_z_f32(pSrcA0Vec, p0);
  264. vecDif = vsubq(vecIn, vecA);
  265. acc0 = vfmaq(acc0, vecDif, vecDif);
  266. }
  267. /*
  268. * Sum the partial parts
  269. */
  270. f32x4_t vtmp = vuninitializedq_f32();
  271. vtmp = vsetq_lane(vecAddAcrossF32Mve(acc0), vtmp, 0);
  272. vSum =
  273. vfmaq_m_f32(vSum, vld1q(pDualCoef),
  274. vexpq_f32(vmulq_n_f32(vtmp, -S->gamma)), vctp32q(1));
  275. }
  276. sum += vecAddAcrossF32Mve(vSum);
  277. *pResult = S->classes[STEP(sum)];
  278. }
  279. #else
  280. #if defined(ARM_MATH_NEON)
  281. #include "NEMath.h"
  282. void arm_svm_rbf_predict_f32(
  283. const arm_svm_rbf_instance_f32 *S,
  284. const float32_t * in,
  285. int32_t * pResult)
  286. {
  287. float32_t sum = S->intercept;
  288. float32_t dot;
  289. float32x4_t dotV;
  290. float32x4_t accuma,accumb,accumc,accumd,accum;
  291. float32x2_t accum2;
  292. float32x4_t temp;
  293. float32x4_t vec1;
  294. float32x4_t vec2,vec2a,vec2b,vec2c,vec2d;
  295. uint32_t blkCnt;
  296. uint32_t vectorBlkCnt;
  297. const float32_t *pIn = in;
  298. const float32_t *pSupport = S->supportVectors;
  299. const float32_t *pSupporta = S->supportVectors;
  300. const float32_t *pSupportb;
  301. const float32_t *pSupportc;
  302. const float32_t *pSupportd;
  303. pSupportb = pSupporta + S->vectorDimension;
  304. pSupportc = pSupportb + S->vectorDimension;
  305. pSupportd = pSupportc + S->vectorDimension;
  306. const float32_t *pDualCoefs = S->dualCoefficients;
  307. vectorBlkCnt = S->nbOfSupportVectors >> 2;
  308. while (vectorBlkCnt > 0U)
  309. {
  310. accuma = vdupq_n_f32(0);
  311. accumb = vdupq_n_f32(0);
  312. accumc = vdupq_n_f32(0);
  313. accumd = vdupq_n_f32(0);
  314. pIn = in;
  315. blkCnt = S->vectorDimension >> 2;
  316. while (blkCnt > 0U)
  317. {
  318. vec1 = vld1q_f32(pIn);
  319. vec2a = vld1q_f32(pSupporta);
  320. vec2b = vld1q_f32(pSupportb);
  321. vec2c = vld1q_f32(pSupportc);
  322. vec2d = vld1q_f32(pSupportd);
  323. pIn += 4;
  324. pSupporta += 4;
  325. pSupportb += 4;
  326. pSupportc += 4;
  327. pSupportd += 4;
  328. temp = vsubq_f32(vec1, vec2a);
  329. accuma = vmlaq_f32(accuma, temp, temp);
  330. temp = vsubq_f32(vec1, vec2b);
  331. accumb = vmlaq_f32(accumb, temp, temp);
  332. temp = vsubq_f32(vec1, vec2c);
  333. accumc = vmlaq_f32(accumc, temp, temp);
  334. temp = vsubq_f32(vec1, vec2d);
  335. accumd = vmlaq_f32(accumd, temp, temp);
  336. blkCnt -- ;
  337. }
  338. accum2 = vpadd_f32(vget_low_f32(accuma),vget_high_f32(accuma));
  339. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,0);
  340. accum2 = vpadd_f32(vget_low_f32(accumb),vget_high_f32(accumb));
  341. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,1);
  342. accum2 = vpadd_f32(vget_low_f32(accumc),vget_high_f32(accumc));
  343. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,2);
  344. accum2 = vpadd_f32(vget_low_f32(accumd),vget_high_f32(accumd));
  345. dotV = vsetq_lane_f32(vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1),dotV,3);
  346. blkCnt = S->vectorDimension & 3;
  347. while (blkCnt > 0U)
  348. {
  349. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,0) + SQ(*pIn - *pSupporta), dotV,0);
  350. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,1) + SQ(*pIn - *pSupportb), dotV,1);
  351. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,2) + SQ(*pIn - *pSupportc), dotV,2);
  352. dotV = vsetq_lane_f32(vgetq_lane_f32(dotV,3) + SQ(*pIn - *pSupportd), dotV,3);
  353. pSupporta++;
  354. pSupportb++;
  355. pSupportc++;
  356. pSupportd++;
  357. pIn++;
  358. blkCnt -- ;
  359. }
  360. vec1 = vld1q_f32(pDualCoefs);
  361. pDualCoefs += 4;
  362. // To vectorize later
  363. dotV = vmulq_n_f32(dotV, -S->gamma);
  364. dotV = vexpq_f32(dotV);
  365. accum = vmulq_f32(vec1,dotV);
  366. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  367. sum += vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  368. pSupporta += 3*S->vectorDimension;
  369. pSupportb += 3*S->vectorDimension;
  370. pSupportc += 3*S->vectorDimension;
  371. pSupportd += 3*S->vectorDimension;
  372. vectorBlkCnt -- ;
  373. }
  374. pSupport = pSupporta;
  375. vectorBlkCnt = S->nbOfSupportVectors & 3;
  376. while (vectorBlkCnt > 0U)
  377. {
  378. accum = vdupq_n_f32(0);
  379. dot = 0.0f;
  380. pIn = in;
  381. blkCnt = S->vectorDimension >> 2;
  382. while (blkCnt > 0U)
  383. {
  384. vec1 = vld1q_f32(pIn);
  385. vec2 = vld1q_f32(pSupport);
  386. pIn += 4;
  387. pSupport += 4;
  388. temp = vsubq_f32(vec1,vec2);
  389. accum = vmlaq_f32(accum, temp,temp);
  390. blkCnt -- ;
  391. }
  392. accum2 = vpadd_f32(vget_low_f32(accum),vget_high_f32(accum));
  393. dot = vget_lane_f32(accum2, 0) + vget_lane_f32(accum2, 1);
  394. blkCnt = S->vectorDimension & 3;
  395. while (blkCnt > 0U)
  396. {
  397. dot = dot + SQ(*pIn - *pSupport);
  398. pIn++;
  399. pSupport++;
  400. blkCnt -- ;
  401. }
  402. sum += *pDualCoefs++ * expf(-S->gamma * dot);
  403. vectorBlkCnt -- ;
  404. }
  405. *pResult=S->classes[STEP(sum)];
  406. }
  407. #else
  408. void arm_svm_rbf_predict_f32(
  409. const arm_svm_rbf_instance_f32 *S,
  410. const float32_t * in,
  411. int32_t * pResult)
  412. {
  413. float32_t sum=S->intercept;
  414. float32_t dot=0;
  415. uint32_t i,j;
  416. const float32_t *pSupport = S->supportVectors;
  417. for(i=0; i < S->nbOfSupportVectors; i++)
  418. {
  419. dot=0;
  420. for(j=0; j < S->vectorDimension; j++)
  421. {
  422. dot = dot + SQ(in[j] - *pSupport);
  423. pSupport++;
  424. }
  425. sum += S->dualCoefficients[i] * expf(-S->gamma * dot);
  426. }
  427. *pResult=S->classes[STEP(sum)];
  428. }
  429. #endif
  430. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  431. /**
  432. * @} end of groupSVM group
  433. */