arm_mat_cmplx_mult_f16.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_mat_cmplx_mult_f16.c
  4. * Description: Floating-point matrix multiplication
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/matrix_functions_f16.h"
  29. #if defined(ARM_FLOAT16_SUPPORTED)
  30. /**
  31. @ingroup groupMatrix
  32. */
  33. /**
  34. @addtogroup CmplxMatrixMult
  35. @{
  36. */
  37. /**
  38. @brief Floating-point Complex matrix multiplication.
  39. @param[in] pSrcA points to first input complex matrix structure
  40. @param[in] pSrcB points to second input complex matrix structure
  41. @param[out] pDst points to output complex matrix structure
  42. @return execution status
  43. - \ref ARM_MATH_SUCCESS : Operation successful
  44. - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed
  45. */
  46. #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && defined(__CMSIS_GCC_H)
  47. #pragma GCC warning "Scalar version of arm_mat_cmplx_mult_f16 built. Helium version has build issues with gcc."
  48. #endif
  49. #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && !defined(__CMSIS_GCC_H)
  50. #include "arm_helium_utils.h"
  51. #define DONTCARE 0 /* inactive lane content */
  52. __STATIC_FORCEINLINE arm_status arm_mat_cmplx_mult_f16_2x2_mve(
  53. const arm_matrix_instance_f16 * pSrcA,
  54. const arm_matrix_instance_f16 * pSrcB,
  55. arm_matrix_instance_f16 * pDst)
  56. {
  57. #define MATRIX_DIM 2
  58. float16_t const *pInB = pSrcB->pData; /* input data matrix pointer B */
  59. float16_t *pInA = pSrcA->pData; /* input data matrix pointer A */
  60. float16_t *pOut = pDst->pData; /* output data matrix pointer */
  61. uint16x8_t vecColBOffs0,vecColAOffs0,vecColAOffs1;
  62. float16_t *pInA0 = pInA;
  63. f16x8_t acc0, acc1;
  64. f16x8_t vecB, vecA0, vecA1;
  65. f16x8_t vecTmp;
  66. uint16_t tmp;
  67. static const uint16_t offsetB0[8] = { 0, 1,
  68. MATRIX_DIM * CMPLX_DIM, MATRIX_DIM * CMPLX_DIM + 1,
  69. 2, 3,
  70. MATRIX_DIM * CMPLX_DIM + 2 , MATRIX_DIM * CMPLX_DIM + 3,
  71. };
  72. vecColBOffs0 = vldrhq_u16((uint16_t const *) offsetB0);
  73. tmp = 0;
  74. vecColAOffs0 = viwdupq_u16(tmp, 4, 1);
  75. tmp = (CMPLX_DIM * MATRIX_DIM);
  76. vecColAOffs1 = vecColAOffs0 + (uint16_t)(CMPLX_DIM * MATRIX_DIM);
  77. pInB = (float16_t const *)pSrcB->pData;
  78. vecA0 = vldrhq_gather_shifted_offset_f16(pInA0, vecColAOffs0);
  79. vecA1 = vldrhq_gather_shifted_offset_f16(pInA0, vecColAOffs1);
  80. vecB = vldrhq_gather_shifted_offset(pInB, vecColBOffs0);
  81. acc0 = vcmulq(vecA0, vecB);
  82. acc0 = vcmlaq_rot90(acc0, vecA0, vecB);
  83. acc1 = vcmulq(vecA1, vecB);
  84. acc1 = vcmlaq_rot90(acc1, vecA1, vecB);
  85. /*
  86. * Compute
  87. * re0+re1 | im0+im1 | re0+re1 | im0+im1
  88. * re2+re3 | im2+im3 | re2+re3 | im2+im3
  89. */
  90. vecTmp = (f16x8_t) vrev64q_s32((int32x4_t) acc0);
  91. vecTmp = vaddq(vecTmp, acc0);
  92. *(float32_t *)(&pOut[0 * CMPLX_DIM * MATRIX_DIM]) = ((f32x4_t)vecTmp)[0];
  93. *(float32_t *)(&pOut[0 * CMPLX_DIM * MATRIX_DIM + CMPLX_DIM]) = ((f32x4_t)vecTmp)[2];
  94. vecTmp = (f16x8_t) vrev64q_s32((int32x4_t) acc1);
  95. vecTmp = vaddq(vecTmp, acc1);
  96. *(float32_t *)(&pOut[1 * CMPLX_DIM * MATRIX_DIM]) = ((f32x4_t)vecTmp)[0];
  97. *(float32_t *)(&pOut[1 * CMPLX_DIM * MATRIX_DIM + CMPLX_DIM]) = ((f32x4_t)vecTmp)[2];
  98. /*
  99. * Return to application
  100. */
  101. return (ARM_MATH_SUCCESS);
  102. #undef MATRIX_DIM
  103. }
  104. __STATIC_FORCEINLINE arm_status arm_mat_cmplx_mult_f16_3x3_mve(
  105. const arm_matrix_instance_f16 * pSrcA,
  106. const arm_matrix_instance_f16 * pSrcB,
  107. arm_matrix_instance_f16 * pDst)
  108. {
  109. #define MATRIX_DIM 3
  110. float16_t const *pInB = pSrcB->pData; /* input data matrix pointer B */
  111. float16_t *pInA = pSrcA->pData; /* input data matrix pointer A */
  112. float16_t *pOut = pDst->pData; /* output data matrix pointer */
  113. uint16x8_t vecColBOffs0;
  114. float16_t *pInA0 = pInA;
  115. float16_t *pInA1 = pInA0 + CMPLX_DIM * MATRIX_DIM;
  116. float16_t *pInA2 = pInA1 + CMPLX_DIM * MATRIX_DIM;
  117. f16x8_t acc0, acc1, acc2;
  118. f16x8_t vecB, vecA0, vecA1, vecA2;
  119. static const uint16_t offsetB0[8] = { 0, 1,
  120. MATRIX_DIM * CMPLX_DIM, MATRIX_DIM * CMPLX_DIM + 1,
  121. 2 * MATRIX_DIM * CMPLX_DIM, 2 * MATRIX_DIM * CMPLX_DIM + 1,
  122. DONTCARE, DONTCARE
  123. };
  124. /* enable predication to disable upper half complex vector element */
  125. mve_pred16_t p0 = vctp16q(MATRIX_DIM * CMPLX_DIM);
  126. vecColBOffs0 = vldrhq_u16((uint16_t const *) offsetB0);
  127. pInB = (float16_t const *)pSrcB->pData;
  128. vecA0 = vldrhq_f16(pInA0);
  129. vecA1 = vldrhq_f16(pInA1);
  130. vecA2 = vldrhq_f16(pInA2);
  131. vecB = vldrhq_gather_shifted_offset_z(pInB, vecColBOffs0, p0);
  132. acc0 = vcmulq(vecA0, vecB);
  133. acc0 = vcmlaq_rot90(acc0, vecA0, vecB);
  134. acc1 = vcmulq(vecA1, vecB);
  135. acc1 = vcmlaq_rot90(acc1, vecA1, vecB);
  136. acc2 = vcmulq(vecA2, vecB);
  137. acc2 = vcmlaq_rot90(acc2, vecA2, vecB);
  138. mve_cmplx_sum_intra_vec_f16(acc0, &pOut[0 * CMPLX_DIM * MATRIX_DIM]);
  139. mve_cmplx_sum_intra_vec_f16(acc1, &pOut[1 * CMPLX_DIM * MATRIX_DIM]);
  140. mve_cmplx_sum_intra_vec_f16(acc2, &pOut[2 * CMPLX_DIM * MATRIX_DIM]);
  141. pOut += CMPLX_DIM;
  142. /*
  143. * move to next B column
  144. */
  145. pInB = pInB + CMPLX_DIM;
  146. vecB = vldrhq_gather_shifted_offset_z(pInB, vecColBOffs0, p0);
  147. acc0 = vcmulq(vecA0, vecB);
  148. acc0 = vcmlaq_rot90(acc0, vecA0, vecB);
  149. acc1 = vcmulq(vecA1, vecB);
  150. acc1 = vcmlaq_rot90(acc1, vecA1, vecB);
  151. acc2 = vcmulq(vecA2, vecB);
  152. acc2 = vcmlaq_rot90(acc2, vecA2, vecB);
  153. mve_cmplx_sum_intra_vec_f16(acc0, &pOut[0 * CMPLX_DIM * MATRIX_DIM]);
  154. mve_cmplx_sum_intra_vec_f16(acc1, &pOut[1 * CMPLX_DIM * MATRIX_DIM]);
  155. mve_cmplx_sum_intra_vec_f16(acc2, &pOut[2 * CMPLX_DIM * MATRIX_DIM]);
  156. pOut += CMPLX_DIM;
  157. /*
  158. * move to next B column
  159. */
  160. pInB = pInB + CMPLX_DIM;
  161. vecB = vldrhq_gather_shifted_offset_z(pInB, vecColBOffs0, p0);
  162. acc0 = vcmulq(vecA0, vecB);
  163. acc0 = vcmlaq_rot90(acc0, vecA0, vecB);
  164. acc1 = vcmulq(vecA1, vecB);
  165. acc1 = vcmlaq_rot90(acc1, vecA1, vecB);
  166. acc2 = vcmulq(vecA2, vecB);
  167. acc2 = vcmlaq_rot90(acc2, vecA2, vecB);
  168. mve_cmplx_sum_intra_vec_f16(acc0, &pOut[0 * CMPLX_DIM * MATRIX_DIM]);
  169. mve_cmplx_sum_intra_vec_f16(acc1, &pOut[1 * CMPLX_DIM * MATRIX_DIM]);
  170. mve_cmplx_sum_intra_vec_f16(acc2, &pOut[2 * CMPLX_DIM * MATRIX_DIM]);
  171. /*
  172. * Return to application
  173. */
  174. return (ARM_MATH_SUCCESS);
  175. #undef MATRIX_DIM
  176. }
  177. __STATIC_FORCEINLINE arm_status arm_mat_cmplx_mult_f16_4x4_mve(
  178. const arm_matrix_instance_f16 * pSrcA,
  179. const arm_matrix_instance_f16 * pSrcB,
  180. arm_matrix_instance_f16 * pDst)
  181. {
  182. #define MATRIX_DIM 4
  183. float16_t const *pInB = pSrcB->pData; /* input data matrix pointer B */
  184. float16_t *pInA = pSrcA->pData; /* input data matrix pointer A */
  185. float16_t *pOut = pDst->pData; /* output data matrix pointer */
  186. uint16x8_t vecColBOffs0;
  187. float16_t *pInA0 = pInA;
  188. float16_t *pInA1 = pInA0 + CMPLX_DIM * MATRIX_DIM;
  189. float16_t *pInA2 = pInA1 + CMPLX_DIM * MATRIX_DIM;
  190. float16_t *pInA3 = pInA2 + CMPLX_DIM * MATRIX_DIM;
  191. f16x8_t acc0, acc1, acc2, acc3;
  192. f16x8_t vecB, vecA;
  193. static const uint16_t offsetB0[8] = { 0, 1,
  194. MATRIX_DIM * CMPLX_DIM, MATRIX_DIM * CMPLX_DIM + 1,
  195. 2 * MATRIX_DIM * CMPLX_DIM, 2 * MATRIX_DIM * CMPLX_DIM + 1,
  196. 3 * MATRIX_DIM * CMPLX_DIM, 3 * MATRIX_DIM * CMPLX_DIM + 1
  197. };
  198. vecColBOffs0 = vldrhq_u16((uint16_t const *) offsetB0);
  199. pInB = (float16_t const *)pSrcB->pData;
  200. vecB = vldrhq_gather_shifted_offset(pInB, vecColBOffs0);
  201. vecA = vldrhq_f16(pInA0);
  202. acc0 = vcmulq(vecA, vecB);
  203. acc0 = vcmlaq_rot90(acc0, vecA, vecB);
  204. vecA = vldrhq_f16(pInA1);
  205. acc1 = vcmulq(vecA, vecB);
  206. acc1 = vcmlaq_rot90(acc1, vecA, vecB);
  207. vecA = vldrhq_f16(pInA2);
  208. acc2 = vcmulq(vecA, vecB);
  209. acc2 = vcmlaq_rot90(acc2, vecA, vecB);
  210. vecA = vldrhq_f16(pInA3);
  211. acc3 = vcmulq(vecA, vecB);
  212. acc3 = vcmlaq_rot90(acc3, vecA, vecB);
  213. mve_cmplx_sum_intra_vec_f16(acc0, &pOut[0 * CMPLX_DIM * MATRIX_DIM]);
  214. mve_cmplx_sum_intra_vec_f16(acc1, &pOut[1 * CMPLX_DIM * MATRIX_DIM]);
  215. mve_cmplx_sum_intra_vec_f16(acc2, &pOut[2 * CMPLX_DIM * MATRIX_DIM]);
  216. mve_cmplx_sum_intra_vec_f16(acc3, &pOut[3 * CMPLX_DIM * MATRIX_DIM]);
  217. pOut += CMPLX_DIM;
  218. /*
  219. * move to next B column
  220. */
  221. pInB = pInB + CMPLX_DIM;
  222. vecB = vldrhq_gather_shifted_offset(pInB, vecColBOffs0);
  223. vecA = vldrhq_f16(pInA0);
  224. acc0 = vcmulq(vecA, vecB);
  225. acc0 = vcmlaq_rot90(acc0, vecA, vecB);
  226. vecA = vldrhq_f16(pInA1);
  227. acc1 = vcmulq(vecA, vecB);
  228. acc1 = vcmlaq_rot90(acc1, vecA, vecB);
  229. vecA = vldrhq_f16(pInA2);
  230. acc2 = vcmulq(vecA, vecB);
  231. acc2 = vcmlaq_rot90(acc2, vecA, vecB);
  232. vecA = vldrhq_f16(pInA3);
  233. acc3 = vcmulq(vecA, vecB);
  234. acc3 = vcmlaq_rot90(acc3, vecA, vecB);
  235. mve_cmplx_sum_intra_vec_f16(acc0, &pOut[0 * CMPLX_DIM * MATRIX_DIM]);
  236. mve_cmplx_sum_intra_vec_f16(acc1, &pOut[1 * CMPLX_DIM * MATRIX_DIM]);
  237. mve_cmplx_sum_intra_vec_f16(acc2, &pOut[2 * CMPLX_DIM * MATRIX_DIM]);
  238. mve_cmplx_sum_intra_vec_f16(acc3, &pOut[3 * CMPLX_DIM * MATRIX_DIM]);
  239. pOut += CMPLX_DIM;
  240. /*
  241. * move to next B column
  242. */
  243. pInB = pInB + CMPLX_DIM;
  244. vecB = vldrhq_gather_shifted_offset(pInB, vecColBOffs0);
  245. vecA = vldrhq_f16(pInA0);
  246. acc0 = vcmulq(vecA, vecB);
  247. acc0 = vcmlaq_rot90(acc0, vecA, vecB);
  248. vecA = vldrhq_f16(pInA1);
  249. acc1 = vcmulq(vecA, vecB);
  250. acc1 = vcmlaq_rot90(acc1, vecA, vecB);
  251. vecA = vldrhq_f16(pInA2);
  252. acc2 = vcmulq(vecA, vecB);
  253. acc2 = vcmlaq_rot90(acc2, vecA, vecB);
  254. vecA = vldrhq_f16(pInA3);
  255. acc3 = vcmulq(vecA, vecB);
  256. acc3 = vcmlaq_rot90(acc3, vecA, vecB);
  257. mve_cmplx_sum_intra_vec_f16(acc0, &pOut[0 * CMPLX_DIM * MATRIX_DIM]);
  258. mve_cmplx_sum_intra_vec_f16(acc1, &pOut[1 * CMPLX_DIM * MATRIX_DIM]);
  259. mve_cmplx_sum_intra_vec_f16(acc2, &pOut[2 * CMPLX_DIM * MATRIX_DIM]);
  260. mve_cmplx_sum_intra_vec_f16(acc3, &pOut[3 * CMPLX_DIM * MATRIX_DIM]);
  261. pOut += CMPLX_DIM;
  262. /*
  263. * move to next B column
  264. */
  265. pInB = pInB + CMPLX_DIM;
  266. vecB = vldrhq_gather_shifted_offset(pInB, vecColBOffs0);
  267. vecA = vldrhq_f16(pInA0);
  268. acc0 = vcmulq(vecA, vecB);
  269. acc0 = vcmlaq_rot90(acc0, vecA, vecB);
  270. vecA = vldrhq_f16(pInA1);
  271. acc1 = vcmulq(vecA, vecB);
  272. acc1 = vcmlaq_rot90(acc1, vecA, vecB);
  273. vecA = vldrhq_f16(pInA2);
  274. acc2 = vcmulq(vecA, vecB);
  275. acc2 = vcmlaq_rot90(acc2, vecA, vecB);
  276. vecA = vldrhq_f16(pInA3);
  277. acc3 = vcmulq(vecA, vecB);
  278. acc3 = vcmlaq_rot90(acc3, vecA, vecB);
  279. mve_cmplx_sum_intra_vec_f16(acc0, &pOut[0 * CMPLX_DIM * MATRIX_DIM]);
  280. mve_cmplx_sum_intra_vec_f16(acc1, &pOut[1 * CMPLX_DIM * MATRIX_DIM]);
  281. mve_cmplx_sum_intra_vec_f16(acc2, &pOut[2 * CMPLX_DIM * MATRIX_DIM]);
  282. mve_cmplx_sum_intra_vec_f16(acc3, &pOut[3 * CMPLX_DIM * MATRIX_DIM]);
  283. /*
  284. * Return to application
  285. */
  286. return (ARM_MATH_SUCCESS);
  287. #undef MATRIX_DIM
  288. }
  289. arm_status arm_mat_cmplx_mult_f16(
  290. const arm_matrix_instance_f16 * pSrcA,
  291. const arm_matrix_instance_f16 * pSrcB,
  292. arm_matrix_instance_f16 * pDst)
  293. {
  294. float16_t const *pInB = (float16_t const *) pSrcB->pData; /* input data matrix pointer B */
  295. float16_t const *pInA = (float16_t const *) pSrcA->pData; /* input data matrix pointer A */
  296. float16_t *pOut = pDst->pData; /* output data matrix pointer */
  297. float16_t *px; /* Temporary output data matrix pointer */
  298. uint16_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */
  299. uint16_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */
  300. uint16_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */
  301. uint16_t col, i = 0U, row = numRowsA; /* loop counters */
  302. arm_status status; /* status of matrix multiplication */
  303. uint16x8_t vecOffs, vecColBOffs;
  304. uint32_t blkCnt,rowCnt; /* loop counters */
  305. #ifdef ARM_MATH_MATRIX_CHECK
  306. /* Check for matrix mismatch condition */
  307. if ((pSrcA->numCols != pSrcB->numRows) ||
  308. (pSrcA->numRows != pDst->numRows) ||
  309. (pSrcB->numCols != pDst->numCols) )
  310. {
  311. /* Set status as ARM_MATH_SIZE_MISMATCH */
  312. status = ARM_MATH_SIZE_MISMATCH;
  313. }
  314. else
  315. #endif /* #ifdef ARM_MATH_MATRIX_CHECK */
  316. {
  317. /*
  318. * small squared matrix specialized routines
  319. */
  320. if (numRowsA == numColsB && numColsB == numColsA)
  321. {
  322. if (numRowsA == 1)
  323. {
  324. pOut[0] = (_Float16)pInA[0] * (_Float16)pInB[0] - (_Float16)pInA[1] * (_Float16)pInB[1];
  325. pOut[1] = (_Float16)pInA[0] * (_Float16)pInB[1] + (_Float16)pInA[1] * (_Float16)pInB[0];
  326. return (ARM_MATH_SUCCESS);
  327. }
  328. else if (numRowsA == 2)
  329. return arm_mat_cmplx_mult_f16_2x2_mve(pSrcA, pSrcB, pDst);
  330. else if (numRowsA == 3)
  331. return arm_mat_cmplx_mult_f16_3x3_mve(pSrcA, pSrcB, pDst);
  332. else if (numRowsA == 4)
  333. return arm_mat_cmplx_mult_f16_4x4_mve(pSrcA, pSrcB, pDst);
  334. }
  335. vecColBOffs[0] = 0;
  336. vecColBOffs[1] = 1;
  337. vecColBOffs[2] = numColsB * CMPLX_DIM;
  338. vecColBOffs[3] = (numColsB * CMPLX_DIM) + 1;
  339. vecColBOffs[4] = 2*numColsB * CMPLX_DIM;
  340. vecColBOffs[5] = 2*(numColsB * CMPLX_DIM) + 1;
  341. vecColBOffs[6] = 3*numColsB * CMPLX_DIM;
  342. vecColBOffs[7] = 3*(numColsB * CMPLX_DIM) + 1;
  343. /*
  344. * The following loop performs the dot-product of each row in pSrcA with each column in pSrcB
  345. */
  346. /*
  347. * row loop
  348. */
  349. rowCnt = row >> 2;
  350. while (rowCnt > 0u)
  351. {
  352. /*
  353. * Output pointer is set to starting address of the row being processed
  354. */
  355. px = pOut + i * CMPLX_DIM;
  356. i = i + 4 * numColsB;
  357. /*
  358. * For every row wise process, the column loop counter is to be initiated
  359. */
  360. col = numColsB;
  361. /*
  362. * For every row wise process, the pInB pointer is set
  363. * to the starting address of the pSrcB data
  364. */
  365. pInB = (float16_t const *) pSrcB->pData;
  366. /*
  367. * column loop
  368. */
  369. while (col > 0u)
  370. {
  371. /*
  372. * generate 4 columns elements
  373. */
  374. /*
  375. * Matrix A columns number of MAC operations are to be performed
  376. */
  377. float16_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec;
  378. float16_t const *pInA0 = pInA;
  379. float16_t const *pInA1 = pInA0 + numColsA * CMPLX_DIM;
  380. float16_t const *pInA2 = pInA1 + numColsA * CMPLX_DIM;
  381. float16_t const *pInA3 = pInA2 + numColsA * CMPLX_DIM;
  382. f16x8_t acc0, acc1, acc2, acc3;
  383. acc0 = vdupq_n_f16(0.0f16);
  384. acc1 = vdupq_n_f16(0.0f16);
  385. acc2 = vdupq_n_f16(0.0f16);
  386. acc3 = vdupq_n_f16(0.0f16);
  387. pSrcA0Vec = (float16_t const *) pInA0;
  388. pSrcA1Vec = (float16_t const *) pInA1;
  389. pSrcA2Vec = (float16_t const *) pInA2;
  390. pSrcA3Vec = (float16_t const *) pInA3;
  391. vecOffs = vecColBOffs;
  392. /*
  393. * process 1 x 4 block output
  394. */
  395. blkCnt = (numColsA * CMPLX_DIM) >> 3;
  396. while (blkCnt > 0U)
  397. {
  398. f16x8_t vecB, vecA;
  399. vecB = vldrhq_gather_shifted_offset_f16(pInB, vecOffs);
  400. /*
  401. * move Matrix B read offsets, 4 rows down
  402. */
  403. vecOffs = vaddq_n_u16(vecOffs , (uint16_t) (numColsB * 4 * CMPLX_DIM));
  404. vecA = vld1q(pSrcA0Vec); pSrcA0Vec += 8;
  405. acc0 = vcmlaq(acc0, vecA, vecB);
  406. acc0 = vcmlaq_rot90(acc0, vecA, vecB);
  407. vecA = vld1q(pSrcA1Vec); pSrcA1Vec += 8;
  408. acc1 = vcmlaq(acc1, vecA, vecB);
  409. acc1 = vcmlaq_rot90(acc1, vecA, vecB);
  410. vecA = vld1q(pSrcA2Vec); pSrcA2Vec += 8;
  411. acc2 = vcmlaq(acc2, vecA, vecB);
  412. acc2 = vcmlaq_rot90(acc2, vecA, vecB);
  413. vecA = vld1q(pSrcA3Vec); pSrcA3Vec += 8;
  414. acc3 = vcmlaq(acc3, vecA, vecB);
  415. acc3 = vcmlaq_rot90(acc3, vecA, vecB);
  416. blkCnt--;
  417. }
  418. /*
  419. * Unsupported addressing mode compiler crash
  420. */
  421. /*
  422. * tail
  423. * (will be merged thru tail predication)
  424. */
  425. blkCnt = (numColsA * CMPLX_DIM) & 7;
  426. if (blkCnt > 0U)
  427. {
  428. mve_pred16_t p0 = vctp16q(blkCnt);
  429. f16x8_t vecB, vecA;
  430. vecB = vldrhq_gather_shifted_offset_z_f16(pInB, vecOffs, p0);
  431. /*
  432. * move Matrix B read offsets, 4 rows down
  433. */
  434. vecOffs = vaddq_n_u16(vecOffs, (uint16_t) (numColsB * 4 * CMPLX_DIM));
  435. vecA = vld1q(pSrcA0Vec);
  436. acc0 = vcmlaq(acc0, vecA, vecB);
  437. acc0 = vcmlaq_rot90(acc0, vecA, vecB);
  438. vecA = vld1q(pSrcA1Vec);
  439. acc1 = vcmlaq(acc1, vecA, vecB);
  440. acc1 = vcmlaq_rot90(acc1, vecA, vecB);
  441. vecA = vld1q(pSrcA2Vec);
  442. acc2 = vcmlaq(acc2, vecA, vecB);
  443. acc2 = vcmlaq_rot90(acc2, vecA, vecB);
  444. vecA = vld1q(pSrcA3Vec);
  445. acc3 = vcmlaq(acc3, vecA, vecB);
  446. acc3 = vcmlaq_rot90(acc3, vecA, vecB);
  447. }
  448. mve_cmplx_sum_intra_vec_f16(acc0, &px[0 * CMPLX_DIM * numColsB + 0]);
  449. mve_cmplx_sum_intra_vec_f16(acc1, &px[1 * CMPLX_DIM * numColsB + 0]);
  450. mve_cmplx_sum_intra_vec_f16(acc2, &px[2 * CMPLX_DIM * numColsB + 0]);
  451. mve_cmplx_sum_intra_vec_f16(acc3, &px[3 * CMPLX_DIM * numColsB + 0]);
  452. px += CMPLX_DIM;
  453. /*
  454. * Decrement the column loop counter
  455. */
  456. col--;
  457. /*
  458. * Update the pointer pInB to point to the starting address of the next column
  459. */
  460. pInB = (float16_t const *) pSrcB->pData + (numColsB - col) * CMPLX_DIM;
  461. }
  462. /*
  463. * Update the pointer pInA to point to the starting address of the next row
  464. */
  465. pInA += (numColsA * 4) * CMPLX_DIM;
  466. /*
  467. * Decrement the row loop counter
  468. */
  469. rowCnt --;
  470. }
  471. rowCnt = row & 3;
  472. while (rowCnt > 0u)
  473. {
  474. /*
  475. * Output pointer is set to starting address of the row being processed
  476. */
  477. px = pOut + i * CMPLX_DIM;
  478. i = i + numColsB;
  479. /*
  480. * For every row wise process, the column loop counter is to be initiated
  481. */
  482. col = numColsB;
  483. /*
  484. * For every row wise process, the pInB pointer is set
  485. * to the starting address of the pSrcB data
  486. */
  487. pInB = (float16_t const *) pSrcB->pData;
  488. /*
  489. * column loop
  490. */
  491. while (col > 0u)
  492. {
  493. /*
  494. * generate 4 columns elements
  495. */
  496. /*
  497. * Matrix A columns number of MAC operations are to be performed
  498. */
  499. float16_t const *pSrcA0Vec;
  500. float16_t const *pInA0 = pInA;
  501. f16x8_t acc0;
  502. acc0 = vdupq_n_f16(0.0f16);
  503. pSrcA0Vec = (float16_t const *) pInA0;
  504. vecOffs = vecColBOffs;
  505. /*
  506. * process 1 x 4 block output
  507. */
  508. blkCnt = (numColsA * CMPLX_DIM) >> 3;
  509. while (blkCnt > 0U)
  510. {
  511. f16x8_t vecB, vecA;
  512. vecB = vldrhq_gather_shifted_offset(pInB, vecOffs);
  513. /*
  514. * move Matrix B read offsets, 4 rows down
  515. */
  516. vecOffs = vaddq_n_u16(vecOffs, (uint16_t) (4*numColsB * CMPLX_DIM));
  517. vecA = vld1q(pSrcA0Vec);
  518. pSrcA0Vec += 8;
  519. acc0 = vcmlaq(acc0, vecA, vecB);
  520. acc0 = vcmlaq_rot90(acc0, vecA, vecB);
  521. blkCnt--;
  522. }
  523. /*
  524. * tail
  525. */
  526. blkCnt = (numColsA * CMPLX_DIM) & 7;
  527. if (blkCnt > 0U)
  528. {
  529. mve_pred16_t p0 = vctp16q(blkCnt);
  530. f16x8_t vecB, vecA;
  531. vecB = vldrhq_gather_shifted_offset_z(pInB, vecOffs, p0);
  532. vecA = vld1q(pSrcA0Vec);
  533. acc0 = vcmlaq(acc0, vecA, vecB);
  534. acc0 = vcmlaq_rot90(acc0, vecA, vecB);
  535. }
  536. mve_cmplx_sum_intra_vec_f16(acc0, &px[0]);
  537. px += CMPLX_DIM;
  538. /*
  539. * Decrement the column loop counter
  540. */
  541. col--;
  542. /*
  543. * Update the pointer pInB to point to the starting address of the next column
  544. */
  545. pInB = (float16_t const *) pSrcB->pData + (numColsB - col) * CMPLX_DIM;
  546. }
  547. /*
  548. * Update the pointer pInA to point to the starting address of the next row
  549. */
  550. pInA += numColsA * CMPLX_DIM;
  551. rowCnt--;
  552. }
  553. /*
  554. * set status as ARM_MATH_SUCCESS
  555. */
  556. status = ARM_MATH_SUCCESS;
  557. }
  558. /*
  559. * Return to application
  560. */
  561. return (status);
  562. }
  563. #else
  564. arm_status arm_mat_cmplx_mult_f16(
  565. const arm_matrix_instance_f16 * pSrcA,
  566. const arm_matrix_instance_f16 * pSrcB,
  567. arm_matrix_instance_f16 * pDst)
  568. {
  569. float16_t *pIn1 = pSrcA->pData; /* Input data matrix pointer A */
  570. float16_t *pIn2 = pSrcB->pData; /* Input data matrix pointer B */
  571. float16_t *pInA = pSrcA->pData; /* Input data matrix pointer A */
  572. float16_t *pOut = pDst->pData; /* Output data matrix pointer */
  573. float16_t *px; /* Temporary output data matrix pointer */
  574. uint16_t numRowsA = pSrcA->numRows; /* Number of rows of input matrix A */
  575. uint16_t numColsB = pSrcB->numCols; /* Number of columns of input matrix B */
  576. uint16_t numColsA = pSrcA->numCols; /* Number of columns of input matrix A */
  577. _Float16 sumReal, sumImag; /* Accumulator */
  578. _Float16 a1, b1, c1, d1;
  579. uint32_t col, i = 0U, j, row = numRowsA, colCnt; /* loop counters */
  580. arm_status status; /* status of matrix multiplication */
  581. #if defined (ARM_MATH_LOOPUNROLL)
  582. _Float16 a0, b0, c0, d0;
  583. #endif
  584. #ifdef ARM_MATH_MATRIX_CHECK
  585. /* Check for matrix mismatch condition */
  586. if ((pSrcA->numCols != pSrcB->numRows) ||
  587. (pSrcA->numRows != pDst->numRows) ||
  588. (pSrcB->numCols != pDst->numCols) )
  589. {
  590. /* Set status as ARM_MATH_SIZE_MISMATCH */
  591. status = ARM_MATH_SIZE_MISMATCH;
  592. }
  593. else
  594. #endif /* #ifdef ARM_MATH_MATRIX_CHECK */
  595. {
  596. /* The following loop performs the dot-product of each row in pSrcA with each column in pSrcB */
  597. /* row loop */
  598. do
  599. {
  600. /* Output pointer is set to starting address of the row being processed */
  601. px = pOut + 2 * i;
  602. /* For every row wise process, the column loop counter is to be initiated */
  603. col = numColsB;
  604. /* For every row wise process, the pIn2 pointer is set
  605. ** to the starting address of the pSrcB data */
  606. pIn2 = pSrcB->pData;
  607. j = 0U;
  608. /* column loop */
  609. do
  610. {
  611. /* Set the variable sum, that acts as accumulator, to zero */
  612. sumReal = 0.0f16;
  613. sumImag = 0.0f16;
  614. /* Initiate pointer pIn1 to point to starting address of column being processed */
  615. pIn1 = pInA;
  616. #if defined (ARM_MATH_LOOPUNROLL)
  617. /* Apply loop unrolling and compute 4 MACs simultaneously. */
  618. colCnt = numColsA >> 2U;
  619. /* matrix multiplication */
  620. while (colCnt > 0U)
  621. {
  622. /* Reading real part of complex matrix A */
  623. a0 = *pIn1;
  624. /* Reading real part of complex matrix B */
  625. c0 = *pIn2;
  626. /* Reading imaginary part of complex matrix A */
  627. b0 = *(pIn1 + 1U);
  628. /* Reading imaginary part of complex matrix B */
  629. d0 = *(pIn2 + 1U);
  630. /* Multiply and Accumlates */
  631. sumReal += a0 * c0;
  632. sumImag += b0 * c0;
  633. /* update pointers */
  634. pIn1 += 2U;
  635. pIn2 += 2 * numColsB;
  636. /* Multiply and Accumlates */
  637. sumReal -= b0 * d0;
  638. sumImag += a0 * d0;
  639. /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */
  640. /* read real and imag values from pSrcA and pSrcB buffer */
  641. a1 = *(pIn1 );
  642. c1 = *(pIn2 );
  643. b1 = *(pIn1 + 1U);
  644. d1 = *(pIn2 + 1U);
  645. /* Multiply and Accumlates */
  646. sumReal += a1 * c1;
  647. sumImag += b1 * c1;
  648. /* update pointers */
  649. pIn1 += 2U;
  650. pIn2 += 2 * numColsB;
  651. /* Multiply and Accumlates */
  652. sumReal -= b1 * d1;
  653. sumImag += a1 * d1;
  654. a0 = *(pIn1 );
  655. c0 = *(pIn2 );
  656. b0 = *(pIn1 + 1U);
  657. d0 = *(pIn2 + 1U);
  658. /* Multiply and Accumlates */
  659. sumReal += a0 * c0;
  660. sumImag += b0 * c0;
  661. /* update pointers */
  662. pIn1 += 2U;
  663. pIn2 += 2 * numColsB;
  664. /* Multiply and Accumlates */
  665. sumReal -= b0 * d0;
  666. sumImag += a0 * d0;
  667. /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */
  668. a1 = *(pIn1 );
  669. c1 = *(pIn2 );
  670. b1 = *(pIn1 + 1U);
  671. d1 = *(pIn2 + 1U);
  672. /* Multiply and Accumlates */
  673. sumReal += a1 * c1;
  674. sumImag += b1 * c1;
  675. /* update pointers */
  676. pIn1 += 2U;
  677. pIn2 += 2 * numColsB;
  678. /* Multiply and Accumlates */
  679. sumReal -= b1 * d1;
  680. sumImag += a1 * d1;
  681. /* Decrement loop count */
  682. colCnt--;
  683. }
  684. /* If the columns of pSrcA is not a multiple of 4, compute any remaining MACs here.
  685. ** No loop unrolling is used. */
  686. colCnt = numColsA % 0x4U;
  687. #else
  688. /* Initialize blkCnt with number of samples */
  689. colCnt = numColsA;
  690. #endif /* #if defined (ARM_MATH_LOOPUNROLL) */
  691. while (colCnt > 0U)
  692. {
  693. /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */
  694. a1 = *(pIn1 );
  695. c1 = *(pIn2 );
  696. b1 = *(pIn1 + 1U);
  697. d1 = *(pIn2 + 1U);
  698. /* Multiply and Accumlates */
  699. sumReal += a1 * c1;
  700. sumImag += b1 * c1;
  701. /* update pointers */
  702. pIn1 += 2U;
  703. pIn2 += 2 * numColsB;
  704. /* Multiply and Accumlates */
  705. sumReal -= b1 * d1;
  706. sumImag += a1 * d1;
  707. /* Decrement loop counter */
  708. colCnt--;
  709. }
  710. /* Store result in destination buffer */
  711. *px++ = sumReal;
  712. *px++ = sumImag;
  713. /* Update pointer pIn2 to point to starting address of next column */
  714. j++;
  715. pIn2 = pSrcB->pData + 2U * j;
  716. /* Decrement column loop counter */
  717. col--;
  718. } while (col > 0U);
  719. /* Update pointer pInA to point to starting address of next row */
  720. i = i + numColsB;
  721. pInA = pInA + 2 * numColsA;
  722. /* Decrement row loop counter */
  723. row--;
  724. } while (row > 0U);
  725. /* Set status as ARM_MATH_SUCCESS */
  726. status = ARM_MATH_SUCCESS;
  727. }
  728. /* Return to application */
  729. return (status);
  730. }
  731. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  732. /**
  733. @} end of MatrixMult group
  734. */
  735. #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */