arm_svm_rbf_predict_f16.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_svm_rbf_predict_f16.c
  4. * Description: SVM Radial Basis Function Classifier
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/svm_functions_f16.h"
  29. #if defined(ARM_FLOAT16_SUPPORTED)
  30. #include <limits.h>
  31. #include <math.h>
  32. /**
  33. * @addtogroup rbfsvm
  34. * @{
  35. */
  36. /**
  37. * @brief SVM rbf prediction
  38. * @param[in] S Pointer to an instance of the rbf SVM structure.
  39. * @param[in] in Pointer to input vector
  40. * @param[out] pResult decision value
  41. *
  42. */
  43. #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
  44. #include "arm_helium_utils.h"
  45. #include "arm_vec_math_f16.h"
  46. void arm_svm_rbf_predict_f16(
  47. const arm_svm_rbf_instance_f16 *S,
  48. const float16_t * in,
  49. int32_t * pResult)
  50. {
  51. /* inlined Matrix x Vector function interleaved with dot prod */
  52. uint32_t numRows = S->nbOfSupportVectors;
  53. uint32_t numCols = S->vectorDimension;
  54. const float16_t *pSupport = S->supportVectors;
  55. const float16_t *pSrcA = pSupport;
  56. const float16_t *pInA0;
  57. const float16_t *pInA1;
  58. uint32_t row;
  59. uint32_t blkCnt; /* loop counters */
  60. const float16_t *pDualCoef = S->dualCoefficients;
  61. _Float16 sum = S->intercept;
  62. f16x8_t vSum = vdupq_n_f16(0.0f16);
  63. row = numRows;
  64. /*
  65. * compute 4 rows in parrallel
  66. */
  67. while (row >= 4) {
  68. const float16_t *pInA2, *pInA3;
  69. float16_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec, *pInVec;
  70. f16x8_t vecIn, acc0, acc1, acc2, acc3;
  71. float16_t const *pSrcVecPtr = in;
  72. /*
  73. * Initialize the pointers to 4 consecutive MatrixA rows
  74. */
  75. pInA0 = pSrcA;
  76. pInA1 = pInA0 + numCols;
  77. pInA2 = pInA1 + numCols;
  78. pInA3 = pInA2 + numCols;
  79. /*
  80. * Initialize the vector pointer
  81. */
  82. pInVec = pSrcVecPtr;
  83. /*
  84. * reset accumulators
  85. */
  86. acc0 = vdupq_n_f16(0.0f16);
  87. acc1 = vdupq_n_f16(0.0f16);
  88. acc2 = vdupq_n_f16(0.0f16);
  89. acc3 = vdupq_n_f16(0.0f16);
  90. pSrcA0Vec = pInA0;
  91. pSrcA1Vec = pInA1;
  92. pSrcA2Vec = pInA2;
  93. pSrcA3Vec = pInA3;
  94. blkCnt = numCols >> 3;
  95. while (blkCnt > 0U) {
  96. f16x8_t vecA;
  97. f16x8_t vecDif;
  98. vecIn = vld1q(pInVec);
  99. pInVec += 8;
  100. vecA = vld1q(pSrcA0Vec);
  101. pSrcA0Vec += 8;
  102. vecDif = vsubq(vecIn, vecA);
  103. acc0 = vfmaq(acc0, vecDif, vecDif);
  104. vecA = vld1q(pSrcA1Vec);
  105. pSrcA1Vec += 8;
  106. vecDif = vsubq(vecIn, vecA);
  107. acc1 = vfmaq(acc1, vecDif, vecDif);
  108. vecA = vld1q(pSrcA2Vec);
  109. pSrcA2Vec += 8;
  110. vecDif = vsubq(vecIn, vecA);
  111. acc2 = vfmaq(acc2, vecDif, vecDif);
  112. vecA = vld1q(pSrcA3Vec);
  113. pSrcA3Vec += 8;
  114. vecDif = vsubq(vecIn, vecA);
  115. acc3 = vfmaq(acc3, vecDif, vecDif);
  116. blkCnt--;
  117. }
  118. /*
  119. * tail
  120. * (will be merged thru tail predication)
  121. */
  122. blkCnt = numCols & 7;
  123. if (blkCnt > 0U) {
  124. mve_pred16_t p0 = vctp16q(blkCnt);
  125. f16x8_t vecA;
  126. f16x8_t vecDif;
  127. vecIn = vldrhq_z_f16(pInVec, p0);
  128. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  129. vecDif = vsubq(vecIn, vecA);
  130. acc0 = vfmaq(acc0, vecDif, vecDif);
  131. vecA = vldrhq_z_f16(pSrcA1Vec, p0);
  132. vecDif = vsubq(vecIn, vecA);
  133. acc1 = vfmaq(acc1, vecDif, vecDif);
  134. vecA = vldrhq_z_f16(pSrcA2Vec, p0);;
  135. vecDif = vsubq(vecIn, vecA);
  136. acc2 = vfmaq(acc2, vecDif, vecDif);
  137. vecA = vldrhq_z_f16(pSrcA3Vec, p0);
  138. vecDif = vsubq(vecIn, vecA);
  139. acc3 = vfmaq(acc3, vecDif, vecDif);
  140. }
  141. /*
  142. * Sum the partial parts
  143. */
  144. //sum += *pDualCoef++ * expf(-S->gamma * vecReduceF16Mve(acc0));
  145. f16x8_t vtmp = vuninitializedq_f16();
  146. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc0), vtmp, 0);
  147. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc1), vtmp, 1);
  148. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc2), vtmp, 2);
  149. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc3), vtmp, 3);
  150. vSum =
  151. vfmaq_m_f16(vSum, vld1q(pDualCoef),
  152. vexpq_f16(vmulq_n_f16(vtmp, -(_Float16)S->gamma)),vctp16q(4));
  153. pDualCoef += 4;
  154. pSrcA += numCols * 4;
  155. /*
  156. * Decrement the row loop counter
  157. */
  158. row -= 4;
  159. }
  160. /*
  161. * compute 2 rows in parrallel
  162. */
  163. if (row >= 2) {
  164. float16_t const *pSrcA0Vec, *pSrcA1Vec, *pInVec;
  165. f16x8_t vecIn, acc0, acc1;
  166. float16_t const *pSrcVecPtr = in;
  167. /*
  168. * Initialize the pointers to 2 consecutive MatrixA rows
  169. */
  170. pInA0 = pSrcA;
  171. pInA1 = pInA0 + numCols;
  172. /*
  173. * Initialize the vector pointer
  174. */
  175. pInVec = pSrcVecPtr;
  176. /*
  177. * reset accumulators
  178. */
  179. acc0 = vdupq_n_f16(0.0f16);
  180. acc1 = vdupq_n_f16(0.0f16);
  181. pSrcA0Vec = pInA0;
  182. pSrcA1Vec = pInA1;
  183. blkCnt = numCols >> 3;
  184. while (blkCnt > 0U) {
  185. f16x8_t vecA;
  186. f16x8_t vecDif;
  187. vecIn = vld1q(pInVec);
  188. pInVec += 8;
  189. vecA = vld1q(pSrcA0Vec);
  190. pSrcA0Vec += 8;
  191. vecDif = vsubq(vecIn, vecA);
  192. acc0 = vfmaq(acc0, vecDif, vecDif);;
  193. vecA = vld1q(pSrcA1Vec);
  194. pSrcA1Vec += 8;
  195. vecDif = vsubq(vecIn, vecA);
  196. acc1 = vfmaq(acc1, vecDif, vecDif);
  197. blkCnt--;
  198. }
  199. /*
  200. * tail
  201. * (will be merged thru tail predication)
  202. */
  203. blkCnt = numCols & 7;
  204. if (blkCnt > 0U) {
  205. mve_pred16_t p0 = vctp16q(blkCnt);
  206. f16x8_t vecA, vecDif;
  207. vecIn = vldrhq_z_f16(pInVec, p0);
  208. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  209. vecDif = vsubq(vecIn, vecA);
  210. acc0 = vfmaq(acc0, vecDif, vecDif);
  211. vecA = vldrhq_z_f16(pSrcA1Vec, p0);
  212. vecDif = vsubq(vecIn, vecA);
  213. acc1 = vfmaq(acc1, vecDif, vecDif);
  214. }
  215. /*
  216. * Sum the partial parts
  217. */
  218. f16x8_t vtmp = vuninitializedq_f16();
  219. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc0), vtmp, 0);
  220. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc1), vtmp, 1);
  221. vSum =
  222. vfmaq_m_f16(vSum, vld1q(pDualCoef),
  223. vexpq_f16(vmulq_n_f16(vtmp, -(_Float16)S->gamma)), vctp16q(2));
  224. pDualCoef += 2;
  225. pSrcA += numCols * 2;
  226. row -= 2;
  227. }
  228. if (row >= 1) {
  229. f16x8_t vecIn, acc0;
  230. float16_t const *pSrcA0Vec, *pInVec;
  231. float16_t const *pSrcVecPtr = in;
  232. /*
  233. * Initialize the pointers to last MatrixA row
  234. */
  235. pInA0 = pSrcA;
  236. /*
  237. * Initialize the vector pointer
  238. */
  239. pInVec = pSrcVecPtr;
  240. /*
  241. * reset accumulators
  242. */
  243. acc0 = vdupq_n_f16(0.0f);
  244. pSrcA0Vec = pInA0;
  245. blkCnt = numCols >> 3;
  246. while (blkCnt > 0U) {
  247. f16x8_t vecA, vecDif;
  248. vecIn = vld1q(pInVec);
  249. pInVec += 8;
  250. vecA = vld1q(pSrcA0Vec);
  251. pSrcA0Vec += 8;
  252. vecDif = vsubq(vecIn, vecA);
  253. acc0 = vfmaq(acc0, vecDif, vecDif);
  254. blkCnt--;
  255. }
  256. /*
  257. * tail
  258. * (will be merged thru tail predication)
  259. */
  260. blkCnt = numCols & 7;
  261. if (blkCnt > 0U) {
  262. mve_pred16_t p0 = vctp16q(blkCnt);
  263. f16x8_t vecA, vecDif;
  264. vecIn = vldrhq_z_f16(pInVec, p0);
  265. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  266. vecDif = vsubq(vecIn, vecA);
  267. acc0 = vfmaq(acc0, vecDif, vecDif);
  268. }
  269. /*
  270. * Sum the partial parts
  271. */
  272. f16x8_t vtmp = vuninitializedq_f16();
  273. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc0), vtmp, 0);
  274. vSum =
  275. vfmaq_m_f16(vSum, vld1q(pDualCoef),
  276. vexpq_f16(vmulq_n_f16(vtmp, -(_Float16)S->gamma)), vctp16q(1));
  277. }
  278. sum += (_Float16)vecAddAcrossF16Mve(vSum);
  279. *pResult = S->classes[STEP(sum)];
  280. }
  281. #else
  282. void arm_svm_rbf_predict_f16(
  283. const arm_svm_rbf_instance_f16 *S,
  284. const float16_t * in,
  285. int32_t * pResult)
  286. {
  287. _Float16 sum=S->intercept;
  288. _Float16 dot=00.f16;
  289. uint32_t i,j;
  290. const float16_t *pSupport = S->supportVectors;
  291. for(i=0; i < S->nbOfSupportVectors; i++)
  292. {
  293. dot=0.0f16;
  294. for(j=0; j < S->vectorDimension; j++)
  295. {
  296. dot = dot + ARM_SQ((_Float16)in[j] - (_Float16) *pSupport);
  297. pSupport++;
  298. }
  299. sum += (_Float16)S->dualCoefficients[i] * (_Float16)expf((float32_t)(-(_Float16)S->gamma * (_Float16)dot));
  300. }
  301. *pResult=S->classes[STEP(sum)];
  302. }
  303. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  304. /**
  305. * @} end of rbfsvm group
  306. */
  307. #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */