arm_svm_sigmoid_predict_f16.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_svm_sigmoid_predict_f16.c
  4. * Description: SVM Sigmoid Classifier
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/svm_functions_f16.h"
  29. #if defined(ARM_FLOAT16_SUPPORTED)
  30. #include <limits.h>
  31. #include <math.h>
  32. /**
  33. * @addtogroup sigmoidsvm
  34. * @{
  35. */
  36. /**
  37. * @brief SVM sigmoid prediction
  38. * @param[in] S Pointer to an instance of the rbf SVM structure.
  39. * @param[in] in Pointer to input vector
  40. * @param[out] pResult Decision value
  41. * @return none.
  42. *
  43. */
  44. #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
  45. #include "arm_helium_utils.h"
  46. #include "arm_vec_math_f16.h"
  47. void arm_svm_sigmoid_predict_f16(
  48. const arm_svm_sigmoid_instance_f16 *S,
  49. const float16_t * in,
  50. int32_t * pResult)
  51. {
  52. /* inlined Matrix x Vector function interleaved with dot prod */
  53. uint32_t numRows = S->nbOfSupportVectors;
  54. uint32_t numCols = S->vectorDimension;
  55. const float16_t *pSupport = S->supportVectors;
  56. const float16_t *pSrcA = pSupport;
  57. const float16_t *pInA0;
  58. const float16_t *pInA1;
  59. uint32_t row;
  60. uint32_t blkCnt; /* loop counters */
  61. const float16_t *pDualCoef = S->dualCoefficients;
  62. _Float16 sum = S->intercept;
  63. f16x8_t vSum = vdupq_n_f16(0.0f);
  64. row = numRows;
  65. /*
  66. * compute 4 rows in parrallel
  67. */
  68. while (row >= 4) {
  69. const float16_t *pInA2, *pInA3;
  70. float16_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec, *pInVec;
  71. f16x8_t vecIn, acc0, acc1, acc2, acc3;
  72. float16_t const *pSrcVecPtr = in;
  73. /*
  74. * Initialize the pointers to 4 consecutive MatrixA rows
  75. */
  76. pInA0 = pSrcA;
  77. pInA1 = pInA0 + numCols;
  78. pInA2 = pInA1 + numCols;
  79. pInA3 = pInA2 + numCols;
  80. /*
  81. * Initialize the vector pointer
  82. */
  83. pInVec = pSrcVecPtr;
  84. /*
  85. * reset accumulators
  86. */
  87. acc0 = vdupq_n_f16(0.0f);
  88. acc1 = vdupq_n_f16(0.0f);
  89. acc2 = vdupq_n_f16(0.0f);
  90. acc3 = vdupq_n_f16(0.0f);
  91. pSrcA0Vec = pInA0;
  92. pSrcA1Vec = pInA1;
  93. pSrcA2Vec = pInA2;
  94. pSrcA3Vec = pInA3;
  95. blkCnt = numCols >> 3;
  96. while (blkCnt > 0U) {
  97. f16x8_t vecA;
  98. vecIn = vld1q(pInVec);
  99. pInVec += 8;
  100. vecA = vld1q(pSrcA0Vec);
  101. pSrcA0Vec += 8;
  102. acc0 = vfmaq(acc0, vecIn, vecA);
  103. vecA = vld1q(pSrcA1Vec);
  104. pSrcA1Vec += 8;
  105. acc1 = vfmaq(acc1, vecIn, vecA);
  106. vecA = vld1q(pSrcA2Vec);
  107. pSrcA2Vec += 8;
  108. acc2 = vfmaq(acc2, vecIn, vecA);
  109. vecA = vld1q(pSrcA3Vec);
  110. pSrcA3Vec += 8;
  111. acc3 = vfmaq(acc3, vecIn, vecA);
  112. blkCnt--;
  113. }
  114. /*
  115. * tail
  116. * (will be merged thru tail predication)
  117. */
  118. blkCnt = numCols & 7;
  119. if (blkCnt > 0U) {
  120. mve_pred16_t p0 = vctp16q(blkCnt);
  121. f16x8_t vecA;
  122. vecIn = vldrhq_z_f16(pInVec, p0);
  123. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  124. acc0 = vfmaq(acc0, vecIn, vecA);
  125. vecA = vldrhq_z_f16(pSrcA1Vec, p0);
  126. acc1 = vfmaq(acc1, vecIn, vecA);
  127. vecA = vldrhq_z_f16(pSrcA2Vec, p0);
  128. acc2 = vfmaq(acc2, vecIn, vecA);
  129. vecA = vldrhq_z_f16(pSrcA3Vec, p0);
  130. acc3 = vfmaq(acc3, vecIn, vecA);
  131. }
  132. /*
  133. * Sum the partial parts
  134. */
  135. f16x8_t vtmp = vuninitializedq_f16();
  136. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc0), vtmp, 0);
  137. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc1), vtmp, 1);
  138. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc2), vtmp, 2);
  139. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc3), vtmp, 3);
  140. vSum =
  141. vfmaq_m_f16(vSum, vld1q(pDualCoef),
  142. vtanhq_f16(vaddq_n_f16(vmulq_n_f16(vtmp, S->gamma), S->coef0)),vctp16q(4));
  143. pDualCoef += 4;
  144. pSrcA += numCols * 4;
  145. /*
  146. * Decrement the row loop counter
  147. */
  148. row -= 4;
  149. }
  150. /*
  151. * compute 2 rows in parrallel
  152. */
  153. if (row >= 2) {
  154. float16_t const *pSrcA0Vec, *pSrcA1Vec, *pInVec;
  155. f16x8_t vecIn, acc0, acc1;
  156. float16_t const *pSrcVecPtr = in;
  157. /*
  158. * Initialize the pointers to 2 consecutive MatrixA rows
  159. */
  160. pInA0 = pSrcA;
  161. pInA1 = pInA0 + numCols;
  162. /*
  163. * Initialize the vector pointer
  164. */
  165. pInVec = pSrcVecPtr;
  166. /*
  167. * reset accumulators
  168. */
  169. acc0 = vdupq_n_f16(0.0f);
  170. acc1 = vdupq_n_f16(0.0f);
  171. pSrcA0Vec = pInA0;
  172. pSrcA1Vec = pInA1;
  173. blkCnt = numCols >> 3;
  174. while (blkCnt > 0U) {
  175. f16x8_t vecA;
  176. vecIn = vld1q(pInVec);
  177. pInVec += 8;
  178. vecA = vld1q(pSrcA0Vec);
  179. pSrcA0Vec += 8;
  180. acc0 = vfmaq(acc0, vecIn, vecA);
  181. vecA = vld1q(pSrcA1Vec);
  182. pSrcA1Vec += 8;
  183. acc1 = vfmaq(acc1, vecIn, vecA);
  184. blkCnt--;
  185. }
  186. /*
  187. * tail
  188. * (will be merged thru tail predication)
  189. */
  190. blkCnt = numCols & 7;
  191. if (blkCnt > 0U) {
  192. mve_pred16_t p0 = vctp16q(blkCnt);
  193. f16x8_t vecA;
  194. vecIn = vldrhq_z_f16(pInVec, p0);
  195. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  196. acc0 = vfmaq(acc0, vecIn, vecA);
  197. vecA = vldrhq_z_f16(pSrcA1Vec, p0);
  198. acc1 = vfmaq(acc1, vecIn, vecA);
  199. }
  200. /*
  201. * Sum the partial parts
  202. */
  203. f16x8_t vtmp = vuninitializedq_f16();
  204. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc0), vtmp, 0);
  205. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc1), vtmp, 1);
  206. vSum =
  207. vfmaq_m_f16(vSum, vld1q(pDualCoef),
  208. vtanhq_f16(vaddq_n_f16(vmulq_n_f16(vtmp, S->gamma), S->coef0)),
  209. vctp16q(2));
  210. pSrcA += numCols * 2;
  211. row -= 2;
  212. }
  213. if (row >= 1) {
  214. f16x8_t vecIn, acc0;
  215. float16_t const *pSrcA0Vec, *pInVec;
  216. float16_t const *pSrcVecPtr = in;
  217. /*
  218. * Initialize the pointers to last MatrixA row
  219. */
  220. pInA0 = pSrcA;
  221. /*
  222. * Initialize the vector pointer
  223. */
  224. pInVec = pSrcVecPtr;
  225. /*
  226. * reset accumulators
  227. */
  228. acc0 = vdupq_n_f16(0.0f);
  229. pSrcA0Vec = pInA0;
  230. blkCnt = numCols >> 3;
  231. while (blkCnt > 0U) {
  232. f16x8_t vecA;
  233. vecIn = vld1q(pInVec);
  234. pInVec += 8;
  235. vecA = vld1q(pSrcA0Vec);
  236. pSrcA0Vec += 8;
  237. acc0 = vfmaq(acc0, vecIn, vecA);
  238. blkCnt--;
  239. }
  240. /*
  241. * tail
  242. * (will be merged thru tail predication)
  243. */
  244. blkCnt = numCols & 7;
  245. if (blkCnt > 0U) {
  246. mve_pred16_t p0 = vctp16q(blkCnt);
  247. f16x8_t vecA;
  248. vecIn = vldrhq_z_f16(pInVec, p0);
  249. vecA = vldrhq_z_f16(pSrcA0Vec, p0);
  250. acc0 = vfmaq(acc0, vecIn, vecA);
  251. }
  252. /*
  253. * Sum the partial parts
  254. */
  255. f16x8_t vtmp = vuninitializedq_f16();
  256. vtmp = vsetq_lane(vecAddAcrossF16Mve(acc0), vtmp, 0);
  257. vSum =
  258. vfmaq_m_f16(vSum, vld1q(pDualCoef),
  259. vtanhq_f16(vaddq_n_f16(vmulq_n_f16(vtmp, S->gamma), S->coef0)),
  260. vctp16q(1));
  261. }
  262. sum += vecAddAcrossF16Mve(vSum);
  263. *pResult = S->classes[STEP(sum)];
  264. }
  265. #else
  266. void arm_svm_sigmoid_predict_f16(
  267. const arm_svm_sigmoid_instance_f16 *S,
  268. const float16_t * in,
  269. int32_t * pResult)
  270. {
  271. _Float16 sum=S->intercept;
  272. _Float16 dot=0.0f16;
  273. uint32_t i,j;
  274. const float16_t *pSupport = S->supportVectors;
  275. for(i=0; i < S->nbOfSupportVectors; i++)
  276. {
  277. dot=0.0f16;
  278. for(j=0; j < S->vectorDimension; j++)
  279. {
  280. dot = dot + (_Float16)in[j] * (_Float16)*pSupport++;
  281. }
  282. sum += (_Float16)S->dualCoefficients[i] * (_Float16)tanhf((_Float16)S->gamma * dot + (_Float16)S->coef0);
  283. }
  284. *pResult=S->classes[STEP(sum)];
  285. }
  286. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  287. /**
  288. * @} end of sigmoidsvm group
  289. */
  290. #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */