arm_mat_mult_f32.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_mat_mult_f32.c
  4. * Description: Floating-point matrix multiplication
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/matrix_functions.h"
  29. #if defined(ARM_MATH_NEON)
  30. #define GROUPOFROWS 8
  31. #endif
  32. /**
  33. * @ingroup groupMatrix
  34. */
  35. /**
  36. * @defgroup MatrixMult Matrix Multiplication
  37. *
  38. * Multiplies two matrices.
  39. *
  40. * \image html MatrixMultiplication.gif "Multiplication of two 3 x 3 matrices"
  41. * Matrix multiplication is only defined if the number of columns of the
  42. * first matrix equals the number of rows of the second matrix.
  43. * Multiplying an <code>M x N</code> matrix with an <code>N x P</code> matrix results
  44. * in an <code>M x P</code> matrix.
  45. * When matrix size checking is enabled, the functions check: (1) that the inner dimensions of
  46. * <code>pSrcA</code> and <code>pSrcB</code> are equal; and (2) that the size of the output
  47. * matrix equals the outer dimensions of <code>pSrcA</code> and <code>pSrcB</code>.
  48. */
  49. /**
  50. * @addtogroup MatrixMult
  51. * @{
  52. */
  53. #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
  54. #define MATRIX_DIM3 3
  55. #define MATRIX_DIM4 4
  56. __STATIC_INLINE arm_status arm_mat_mult_f32_2x2_mve(
  57. const arm_matrix_instance_f32 *pSrcA,
  58. const arm_matrix_instance_f32 *pSrcB,
  59. arm_matrix_instance_f32 *pDst)
  60. {
  61. /* {a00, a00, a10, a10} */
  62. static const uint32_t offsetA0[4] = { 0, 0, 2, 2 };
  63. /* {b00, b01, b00, b01} */
  64. static const uint32_t offsetB0[4] = { 0, 1, 0, 1 };
  65. /* {a01, a01, a11, a11} */
  66. static const uint32_t offsetA1[4] = { 1, 1, 3, 3 };
  67. /* {b10, b11, b10, b11} */
  68. static const uint32_t offsetB1[4] = { 2, 3, 2, 3 };
  69. uint32x4_t vecOffsA, vecOffsB;
  70. f32x4_t vecInA, vecInB, vecDst;
  71. vecOffsA = vldrwq_u32((uint32_t const *) offsetA0);
  72. vecOffsB = vldrwq_u32((uint32_t const *) offsetB0);
  73. vecInA = vldrwq_gather_shifted_offset((float32_t const *) pSrcA->pData, vecOffsA);
  74. vecInB = vldrwq_gather_shifted_offset((float32_t const *) pSrcB->pData, vecOffsB);
  75. vecDst = vmulq(vecInA, vecInB);
  76. vecOffsA = vldrwq_u32((uint32_t const *) offsetA1);
  77. vecOffsB = vldrwq_u32((uint32_t const *) offsetB1);
  78. vecInA = vldrwq_gather_shifted_offset((float32_t const *) pSrcA->pData, vecOffsA);
  79. vecInB = vldrwq_gather_shifted_offset((float32_t const *) pSrcB->pData, vecOffsB);
  80. vecDst = vfmaq(vecDst, vecInA, vecInB);
  81. vstrwq_f32(pDst->pData, vecDst);
  82. return (ARM_MATH_SUCCESS);
  83. }
  84. /*
  85. * A = {{a00, a01, a02},
  86. * {a10, a11, a12},
  87. * {a20, a21, a22}}
  88. * B = {{b00, b01, b02},
  89. * {b10, b11, b12},
  90. * {b20, b21, b22}}
  91. *
  92. * Dst = {{a00 b00 + a01 b10 + a02 b20, a00 b01 + a01 b11 + a02 b21, a00 b02 + a01 b12 + a02 b22},
  93. * {a10 b00 + a11 b10 + a12 b20, a10 b01 + a11 b11 + a12 b21, a10 b02 + a11 b12 + a12 b22},
  94. * {a20 b00 + a21 b10 + a22 b20, a20 b01 + a21 b11 + a22 b21, a20 b02 + a21 b12 + a22 b22}}
  95. */
  96. __STATIC_INLINE arm_status arm_mat_mult_f32_3x3_mve(
  97. const arm_matrix_instance_f32 *pSrcA,
  98. const arm_matrix_instance_f32 *pSrcB,
  99. arm_matrix_instance_f32 *pDst)
  100. {
  101. float32_t *pInB = pSrcB->pData; /* input data matrix pointer B */
  102. float32_t *pInA = pSrcA->pData; /* input data matrix pointer A */
  103. float32_t *pOut = pDst->pData; /* output data matrix pointer */
  104. float32_t *pInA0, *pInA1, *pInA2;
  105. f32x4_t vecMac0, vecMac1, vecMac2;
  106. f32x4_t vecInB;
  107. float32_t const *pSrBVec;
  108. pSrBVec = (float32_t const *) pInB;
  109. pInA0 = pInA;
  110. pInA1 = pInA0 + MATRIX_DIM3;
  111. pInA2 = pInA1 + MATRIX_DIM3;
  112. /* enable predication to disable last (4th) vector element */
  113. mve_pred16_t p0 = vctp32q(MATRIX_DIM3);
  114. /*
  115. * load {b0,0, b0,1, b0,2, 0}
  116. */
  117. vecInB = vldrwq_z_f32(pSrBVec, p0);
  118. pSrBVec += MATRIX_DIM3;
  119. vecMac0 = vmulq(vecInB, *pInA0++);
  120. vecMac1 = vmulq(vecInB, *pInA1++);
  121. vecMac2 = vmulq(vecInB, *pInA2++);
  122. /*
  123. * load {b1,0, b1,1, b1,2, 0}
  124. */
  125. vecInB = vldrwq_z_f32(pSrBVec, p0);
  126. pSrBVec += MATRIX_DIM3;
  127. vecMac0 = vfmaq(vecMac0, vecInB, *pInA0++);
  128. vecMac1 = vfmaq(vecMac1, vecInB, *pInA1++);
  129. vecMac2 = vfmaq(vecMac2, vecInB, *pInA2++);
  130. /*
  131. * load {b2,0, b2,1 , b2,2, 0}
  132. */
  133. vecInB = vldrwq_z_f32(pSrBVec, p0);
  134. pSrBVec += MATRIX_DIM3;
  135. vecMac0 = vfmaq(vecMac0, vecInB, *pInA0++);
  136. vecMac1 = vfmaq(vecMac1, vecInB, *pInA1++);
  137. vecMac2 = vfmaq(vecMac2, vecInB, *pInA2++);
  138. /* partial vector stores */
  139. vstrwq_p_f32(pOut, vecMac0, p0);
  140. pOut += MATRIX_DIM3;
  141. vstrwq_p_f32(pOut, vecMac1, p0);
  142. pOut += MATRIX_DIM3;
  143. vstrwq_p_f32(pOut, vecMac2, p0);
  144. /*
  145. * Return to application
  146. */
  147. return (ARM_MATH_SUCCESS);
  148. }
  149. __STATIC_INLINE arm_status arm_mat_mult_f32_4x4_mve(
  150. const arm_matrix_instance_f32 *pSrcA,
  151. const arm_matrix_instance_f32 *pSrcB,
  152. arm_matrix_instance_f32 *pDst)
  153. {
  154. float32_t const *pSrBVec;
  155. float32_t *pInB = pSrcB->pData; /* input data matrix pointer B */
  156. float32_t *pInA = pSrcA->pData; /* input data matrix pointer A */
  157. float32_t *pOut = pDst->pData; /* output data matrix pointer */
  158. float32_t *pInA0, *pInA1, *pInA2, *pInA3;
  159. f32x4_t vecMac0, vecMac1, vecMac2, vecMac3;
  160. f32x4_t vecInB;
  161. pSrBVec = (float32_t const *) pInB;
  162. pInA0 = pInA;
  163. pInA1 = pInA0 + MATRIX_DIM4;
  164. pInA2 = pInA1 + MATRIX_DIM4;
  165. pInA3 = pInA2 + MATRIX_DIM4;
  166. /*
  167. * load {b0,0, b0,1, b0,2, b0,3}
  168. */
  169. vecInB = vld1q(pSrBVec);
  170. pSrBVec += MATRIX_DIM4;
  171. vecMac0 = vmulq(vecInB, *pInA0++);
  172. vecMac1 = vmulq(vecInB, *pInA1++);
  173. vecMac2 = vmulq(vecInB, *pInA2++);
  174. vecMac3 = vmulq(vecInB, *pInA3++);
  175. /*
  176. * load {b1,0, b1,1, b1,2, b1,3}
  177. */
  178. vecInB = vld1q(pSrBVec);
  179. pSrBVec += MATRIX_DIM4;
  180. vecMac0 = vfmaq(vecMac0, vecInB, *pInA0++);
  181. vecMac1 = vfmaq(vecMac1, vecInB, *pInA1++);
  182. vecMac2 = vfmaq(vecMac2, vecInB, *pInA2++);
  183. vecMac3 = vfmaq(vecMac3, vecInB, *pInA3++);
  184. /*
  185. * load {b2,0, b2,1, b2,2, b2,3}
  186. */
  187. vecInB = vld1q(pSrBVec);
  188. pSrBVec += MATRIX_DIM4;
  189. vecMac0 = vfmaq(vecMac0, vecInB, *pInA0++);
  190. vecMac1 = vfmaq(vecMac1, vecInB, *pInA1++);
  191. vecMac2 = vfmaq(vecMac2, vecInB, *pInA2++);
  192. vecMac3 = vfmaq(vecMac3, vecInB, *pInA3++);
  193. /*
  194. * load {b3,0, b3,1, b3,2, b3,3}
  195. */
  196. vecInB = vld1q(pSrBVec);
  197. pSrBVec += MATRIX_DIM4;
  198. vecMac0 = vfmaq(vecMac0, vecInB, *pInA0++);
  199. vecMac1 = vfmaq(vecMac1, vecInB, *pInA1++);
  200. vecMac2 = vfmaq(vecMac2, vecInB, *pInA2++);
  201. vecMac3 = vfmaq(vecMac3, vecInB, *pInA3++);
  202. vst1q(pOut, vecMac0);
  203. pOut += MATRIX_DIM4;
  204. vst1q(pOut, vecMac1);
  205. pOut += MATRIX_DIM4;
  206. vst1q(pOut, vecMac2);
  207. pOut += MATRIX_DIM4;
  208. vst1q(pOut, vecMac3);
  209. /*
  210. * Return to application
  211. */
  212. return (ARM_MATH_SUCCESS);
  213. }
  214. /**
  215. * @brief Floating-point matrix multiplication.
  216. * @param[in] *pSrcA points to the first input matrix structure
  217. * @param[in] *pSrcB points to the second input matrix structure
  218. * @param[out] *pDst points to output matrix structure
  219. * @return The function returns either
  220. * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
  221. */
  222. arm_status arm_mat_mult_f32(
  223. const arm_matrix_instance_f32 * pSrcA,
  224. const arm_matrix_instance_f32 * pSrcB,
  225. arm_matrix_instance_f32 * pDst)
  226. {
  227. float32_t *pInB = pSrcB->pData; /* input data matrix pointer B */
  228. float32_t *pInA = pSrcA->pData; /* input data matrix pointer A */
  229. float32_t *pOut = pDst->pData; /* output data matrix pointer */
  230. int numRowsA = pSrcA->numRows; /* number of rows of input matrix A */
  231. int numColsB = pSrcB->numCols; /* number of columns of input matrix B */
  232. int numColsA = pSrcA->numCols; /* number of columns of input matrix A */
  233. uint32_t blkCnt; /* loop counters */
  234. uint32_t i;
  235. arm_status status;
  236. #ifdef ARM_MATH_MATRIX_CHECK
  237. /* Check for matrix mismatch condition */
  238. if ((pSrcA->numCols != pSrcB->numRows) ||
  239. (pSrcA->numRows != pDst->numRows) || (pSrcB->numCols != pDst->numCols))
  240. {
  241. /* Set status as ARM_MATH_SIZE_MISMATCH */
  242. status = ARM_MATH_SIZE_MISMATCH;
  243. }
  244. else
  245. #endif /* #ifdef ARM_MATH_MATRIX_CHECK */
  246. {
  247. /* small squared matrix specialized routines */
  248. if(numRowsA == numColsB && numColsB == numColsA) {
  249. if (numRowsA == 1)
  250. {
  251. pOut[0] = pInA[0] * pInB[0];
  252. return(ARM_MATH_SUCCESS);
  253. }
  254. else if(numRowsA == 2)
  255. return arm_mat_mult_f32_2x2_mve(pSrcA, pSrcB, pDst);
  256. else if(numRowsA == 3)
  257. return arm_mat_mult_f32_3x3_mve(pSrcA, pSrcB, pDst);
  258. else if(numRowsA == 4)
  259. return arm_mat_mult_f32_4x4_mve(pSrcA, pSrcB, pDst);
  260. }
  261. /* main loop process 4 rows */
  262. i = numRowsA >> 2;
  263. while (i > 0U)
  264. {
  265. float32_t *pInA0, *pInA1, *pInA2, *pInA3;
  266. float32_t *pInB0;
  267. float32_t *pOut0, *pOut1, *pOut2, *pOut3;
  268. f32x4_t vecMac0, vecMac1, vecMac2, vecMac3;
  269. f32x4_t vecInB;
  270. /* pointers to 4 consecutive output rows */
  271. pOut0 = pOut;
  272. pOut1 = pOut0 + numColsB;
  273. pOut2 = pOut1 + numColsB;
  274. pOut3 = pOut2 + numColsB;
  275. pInB0 = pInB;
  276. uint32_t k = numColsB >> 2;
  277. while (k > 0U)
  278. {
  279. /* pointers to 4 consecutive Matrix A rows */
  280. pInA0 = pInA;
  281. pInA1 = pInA0 + numColsA;
  282. pInA2 = pInA1 + numColsA;
  283. pInA3 = pInA2 + numColsA;
  284. vecMac0 = vdupq_n_f32(0.0f);
  285. vecMac1 = vdupq_n_f32(0.0f);
  286. vecMac2 = vdupq_n_f32(0.0f);
  287. vecMac3 = vdupq_n_f32(0.0f);
  288. blkCnt = numColsA;
  289. while (blkCnt > 0U)
  290. {
  291. /*
  292. * load {bi,4n+0, bi,4n+1, bi,4n+2, bi,4n+3}
  293. */
  294. vecInB = *(f32x4_t *)pInB0; /* vldrwq_f32(pInB0, 0); */
  295. vecMac0 = vfmaq(vecMac0, vecInB, *pInA0++);
  296. vecMac1 = vfmaq(vecMac1, vecInB, *pInA1++);
  297. vecMac2 = vfmaq(vecMac2, vecInB, *pInA2++);
  298. vecMac3 = vfmaq(vecMac3, vecInB, *pInA3++);
  299. pInB0 = pInB0 + numColsB;
  300. /*
  301. * Decrement the blockSize loop counter
  302. */
  303. blkCnt--;
  304. }
  305. /* Store the results (4 x 4 block) in the destination buffer */
  306. vst1q(pOut0, vecMac0);
  307. pOut0 += 4;
  308. vst1q(pOut1, vecMac1);
  309. pOut1 += 4;
  310. vst1q(pOut2, vecMac2);
  311. pOut2 += 4;
  312. vst1q(pOut3, vecMac3);
  313. pOut3 += 4;
  314. /*
  315. * rewind
  316. */
  317. pInB0 -= (numColsB * numColsA) - 4;
  318. k--;
  319. }
  320. int colBLeft = numColsB & 3;
  321. if (colBLeft)
  322. {
  323. pInA0 = pInA;
  324. pInA1 = pInA0 + numColsA;
  325. pInA2 = pInA1 + numColsA;
  326. pInA3 = pInA2 + numColsA;
  327. mve_pred16_t p0 = vctp32q(colBLeft);
  328. vecMac0 = vdupq_n_f32(0.0f);
  329. vecMac1 = vdupq_n_f32(0.0f);
  330. vecMac2 = vdupq_n_f32(0.0f);
  331. vecMac3 = vdupq_n_f32(0.0f);
  332. blkCnt = numColsA;
  333. while (blkCnt > 0U)
  334. {
  335. /*
  336. * load {bi,4n+0, bi,4n+1, bi,4n+2, bi,4n+3}
  337. */
  338. vecInB = vldrwq_z_f32(pInB0, p0);
  339. vecMac0 = vfmaq(vecMac0, vecInB, *pInA0++);
  340. vecMac1 = vfmaq(vecMac1, vecInB, *pInA1++);
  341. vecMac2 = vfmaq(vecMac2, vecInB, *pInA2++);
  342. vecMac3 = vfmaq(vecMac3, vecInB, *pInA3++);
  343. pInB0 = pInB0 + numColsB;
  344. /*
  345. * Decrement the blockSize loop counter
  346. */
  347. blkCnt--;
  348. }
  349. /* Store the results (4 x colBLeft block) in the destination buffer */
  350. vstrwq_p_f32(pOut0, vecMac0, p0);
  351. vstrwq_p_f32(pOut1, vecMac1, p0);
  352. vstrwq_p_f32(pOut2, vecMac2, p0);
  353. vstrwq_p_f32(pOut3, vecMac3, p0);
  354. }
  355. /* move to next rows */
  356. pInA += 4 * numColsA;
  357. pOut += 4 * numColsB;
  358. i--;
  359. }
  360. /*
  361. * non multiple of 4 rows for Matrix A
  362. * process single row
  363. */
  364. if (numRowsA & 3)
  365. {
  366. i = numRowsA & 3;
  367. while (i > 0U)
  368. {
  369. float32_t *pInA0;
  370. float32_t *pInB0;
  371. float32_t *pOut0;
  372. f32x4_t vecInB;
  373. f32x4_t vecMac0;
  374. pOut0 = pOut;
  375. pInB0 = pInB;
  376. uint32_t k = numColsB >> 2;
  377. while (k > 0U)
  378. {
  379. pInA0 = pInA;
  380. vecMac0 = vdupq_n_f32(0.0f);
  381. blkCnt = numColsA;
  382. while (blkCnt > 0U)
  383. {
  384. /*
  385. * load {bi,4n+0, bi,4n+1, bi,4n+2, bi,4n+3}
  386. */
  387. vecInB = *(f32x4_t *)pInB0; /* vldrwq_f32(pInB0, 0); */
  388. vecMac0 = vfmaq(vecMac0, vecInB, *pInA0++);
  389. pInB0 = pInB0 + numColsB;
  390. /*
  391. * Decrement the blockSize loop counter
  392. */
  393. blkCnt--;
  394. }
  395. /* Store the results (1 x 4 block) in the destination buffer */
  396. vst1q(pOut0, vecMac0);
  397. pOut0 += 4;
  398. /*
  399. * rewind
  400. */
  401. pInB0 -= (numColsB * numColsA) - 4;
  402. k--;
  403. }
  404. int colBLeft = numColsB & 3;
  405. if (colBLeft)
  406. {
  407. pInA0 = pInA;
  408. mve_pred16_t p0 = vctp32q(colBLeft);
  409. vecMac0 = vdupq_n_f32(0.0f);
  410. blkCnt = numColsA;
  411. while (blkCnt > 0U)
  412. {
  413. /*
  414. * load {bi,4n+0, bi,4n+1, bi,4n+2, bi,4n+3}
  415. */
  416. vecInB = vldrwq_z_f32(pInB0, p0);
  417. vecMac0 = vfmaq(vecMac0, vecInB, *pInA0++);
  418. pInB0 = pInB0 + numColsB;
  419. /*
  420. * Decrement the blockSize loop counter
  421. */
  422. blkCnt--;
  423. }
  424. /* Store the results (1 x colBLeft block) in the destination buffer */
  425. vstrwq_p_f32(pOut0, vecMac0, p0);
  426. }
  427. /* move to next row */
  428. pInA += 1 * numColsA;
  429. pOut += 1 * numColsB;
  430. i--;
  431. }
  432. }
  433. status = ARM_MATH_SUCCESS;
  434. }
  435. /* Return to application */
  436. return (status);
  437. }
  438. #else
  439. #if defined(ARM_MATH_NEON)
  440. /**
  441. * @brief Floating-point matrix multiplication.
  442. * @param[in] *pSrcA points to the first input matrix structure
  443. * @param[in] *pSrcB points to the second input matrix structure
  444. * @param[out] *pDst points to output matrix structure
  445. * @return The function returns either
  446. * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
  447. */
  448. arm_status arm_mat_mult_f32(
  449. const arm_matrix_instance_f32 * pSrcA,
  450. const arm_matrix_instance_f32 * pSrcB,
  451. arm_matrix_instance_f32 * pDst)
  452. {
  453. float32_t *pIn1 = pSrcA->pData; /* input data matrix pointer A */
  454. float32_t *pIn2 = pSrcB->pData; /* input data matrix pointer B */
  455. float32_t *pInA = pSrcA->pData; /* input data matrix pointer A */
  456. float32_t *pOut = pDst->pData; /* output data matrix pointer */
  457. float32_t *px; /* Temporary output data matrix pointer */
  458. float32_t sum; /* Accumulator */
  459. uint16_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */
  460. uint16_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */
  461. uint16_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */
  462. uint16_t col, i = 0U, j, row = numRowsA, rowCnt, colCnt; /* loop counters */
  463. arm_status status; /* status of matrix multiplication */
  464. float32x4_t a0V, a1V, a2V, a3V, a4V, a5V, a6V, a7V;
  465. float32x4_t acc0,acc1,acc2,acc3,acc4,acc5,acc6,acc7,temp;
  466. float32x2_t accum = vdup_n_f32(0);
  467. float32_t *pIn1B = pSrcA->pData;
  468. float32_t *pIn1C = pSrcA->pData;
  469. float32_t *pIn1D = pSrcA->pData;
  470. float32_t *pIn1E = pSrcA->pData;
  471. float32_t *pIn1F = pSrcA->pData;
  472. float32_t *pIn1G = pSrcA->pData;
  473. float32_t *pIn1H = pSrcA->pData;
  474. float32_t *pxB,*pxC, *pxD, *pxE, *pxF, *pxG, *pxH; /* Temporary output data matrix pointer */
  475. float32_t sum0,sum1, sum2,sum3, sum4, sum5 , sum6, sum7;
  476. #ifdef ARM_MATH_MATRIX_CHECK
  477. /* Check for matrix mismatch condition */
  478. if ((pSrcA->numCols != pSrcB->numRows) ||
  479. (pSrcA->numRows != pDst->numRows) || (pSrcB->numCols != pDst->numCols))
  480. {
  481. /* Set status as ARM_MATH_SIZE_MISMATCH */
  482. status = ARM_MATH_SIZE_MISMATCH;
  483. }
  484. else
  485. #endif /* #ifdef ARM_MATH_MATRIX_CHECK */
  486. {
  487. /* The following loop performs the dot-product of each row in pSrcA with each column in pSrcB */
  488. /* Row loop */
  489. rowCnt = row >> 3;
  490. while(rowCnt > 0)
  491. {
  492. /* Output pointer is set to starting address of the row being processed */
  493. px = pOut + GROUPOFROWS*i;
  494. pxB = px + numColsB;
  495. pxC = px + 2*numColsB;
  496. pxD = px + 3*numColsB;
  497. pxE = px + 4*numColsB;
  498. pxF = px + 5*numColsB;
  499. pxG = px + 6*numColsB;
  500. pxH = px + 7*numColsB;
  501. /* For every row wise process, the column loop counter is to be initiated */
  502. col = numColsB;
  503. /* For every row wise process, the pIn2 pointer is set
  504. ** to the starting address of the pSrcB data */
  505. pIn2 = pSrcB->pData;
  506. j = 0U;
  507. /* Column loop */
  508. do
  509. {
  510. /* Set the variable sum, that acts as accumulator, to zero */
  511. sum0 = 0.0f;
  512. sum1 = 0.0f;
  513. sum2 = 0.0f;
  514. sum3 = 0.0f;
  515. sum4 = 0.0f;
  516. sum5 = 0.0f;
  517. sum6 = 0.0f;
  518. sum7 = 0.0f;
  519. /* Initiate the pointer pIn1 to point to the starting address of the column being processed */
  520. pIn1 = pInA;
  521. pIn1B = pIn1 + numColsA;
  522. pIn1C = pIn1 + 2*numColsA;
  523. pIn1D = pIn1 + 3*numColsA;
  524. pIn1E = pIn1 + 4*numColsA;
  525. pIn1F = pIn1 + 5*numColsA;
  526. pIn1G = pIn1 + 6*numColsA;
  527. pIn1H = pIn1 + 7*numColsA;
  528. acc0 = vdupq_n_f32(0.0);
  529. acc1 = vdupq_n_f32(0.0);
  530. acc2 = vdupq_n_f32(0.0);
  531. acc3 = vdupq_n_f32(0.0);
  532. acc4 = vdupq_n_f32(0.0);
  533. acc5 = vdupq_n_f32(0.0);
  534. acc6 = vdupq_n_f32(0.0);
  535. acc7 = vdupq_n_f32(0.0);
  536. /* Compute 4 MACs simultaneously. */
  537. colCnt = numColsA >> 2U;
  538. /* Matrix multiplication */
  539. while (colCnt > 0U)
  540. {
  541. /* c(m,n) = a(1,1)*b(1,1) + a(1,2)*b(2,1) + ... + a(m,p)*b(p,n) */
  542. a0V = vld1q_f32(pIn1);
  543. a1V = vld1q_f32(pIn1B);
  544. a2V = vld1q_f32(pIn1C);
  545. a3V = vld1q_f32(pIn1D);
  546. a4V = vld1q_f32(pIn1E);
  547. a5V = vld1q_f32(pIn1F);
  548. a6V = vld1q_f32(pIn1G);
  549. a7V = vld1q_f32(pIn1H);
  550. pIn1 += 4;
  551. pIn1B += 4;
  552. pIn1C += 4;
  553. pIn1D += 4;
  554. pIn1E += 4;
  555. pIn1F += 4;
  556. pIn1G += 4;
  557. pIn1H += 4;
  558. temp = vsetq_lane_f32(*pIn2,temp,0);
  559. pIn2 += numColsB;
  560. temp = vsetq_lane_f32(*pIn2,temp,1);
  561. pIn2 += numColsB;
  562. temp = vsetq_lane_f32(*pIn2,temp,2);
  563. pIn2 += numColsB;
  564. temp = vsetq_lane_f32(*pIn2,temp,3);
  565. pIn2 += numColsB;
  566. acc0 = vmlaq_f32(acc0,a0V,temp);
  567. acc1 = vmlaq_f32(acc1,a1V,temp);
  568. acc2 = vmlaq_f32(acc2,a2V,temp);
  569. acc3 = vmlaq_f32(acc3,a3V,temp);
  570. acc4 = vmlaq_f32(acc4,a4V,temp);
  571. acc5 = vmlaq_f32(acc5,a5V,temp);
  572. acc6 = vmlaq_f32(acc6,a6V,temp);
  573. acc7 = vmlaq_f32(acc7,a7V,temp);
  574. /* Decrement the loop count */
  575. colCnt--;
  576. }
  577. accum = vpadd_f32(vget_low_f32(acc0), vget_high_f32(acc0));
  578. sum0 += vget_lane_f32(accum, 0) + vget_lane_f32(accum, 1);
  579. accum = vpadd_f32(vget_low_f32(acc1), vget_high_f32(acc1));
  580. sum1 += vget_lane_f32(accum, 0) + vget_lane_f32(accum, 1);
  581. accum = vpadd_f32(vget_low_f32(acc2), vget_high_f32(acc2));
  582. sum2 += vget_lane_f32(accum, 0) + vget_lane_f32(accum, 1);
  583. accum = vpadd_f32(vget_low_f32(acc3), vget_high_f32(acc3));
  584. sum3 += vget_lane_f32(accum, 0) + vget_lane_f32(accum, 1);
  585. accum = vpadd_f32(vget_low_f32(acc4), vget_high_f32(acc4));
  586. sum4 += vget_lane_f32(accum, 0) + vget_lane_f32(accum, 1);
  587. accum = vpadd_f32(vget_low_f32(acc5), vget_high_f32(acc5));
  588. sum5 += vget_lane_f32(accum, 0) + vget_lane_f32(accum, 1);
  589. accum = vpadd_f32(vget_low_f32(acc6), vget_high_f32(acc6));
  590. sum6 += vget_lane_f32(accum, 0) + vget_lane_f32(accum, 1);
  591. accum = vpadd_f32(vget_low_f32(acc7), vget_high_f32(acc7));
  592. sum7 += vget_lane_f32(accum, 0) + vget_lane_f32(accum, 1);
  593. /* If the columns of pSrcA is not a multiple of 4, compute any remaining MACs here.
  594. ** No loop unrolling is used. */
  595. colCnt = numColsA & 3;
  596. while (colCnt > 0U)
  597. {
  598. /* c(m,n) = a(1,1)*b(1,1) + a(1,2)*b(2,1) + ... + a(m,p)*b(p,n) */
  599. sum0 += *pIn1++ * (*pIn2);
  600. sum1 += *pIn1B++ * (*pIn2);
  601. sum2 += *pIn1C++ * (*pIn2);
  602. sum3 += *pIn1D++ * (*pIn2);
  603. sum4 += *pIn1E++ * (*pIn2);
  604. sum5 += *pIn1F++ * (*pIn2);
  605. sum6 += *pIn1G++ * (*pIn2);
  606. sum7 += *pIn1H++ * (*pIn2);
  607. pIn2 += numColsB;
  608. /* Decrement the loop counter */
  609. colCnt--;
  610. }
  611. /* Store the result in the destination buffer */
  612. *px++ = sum0;
  613. *pxB++ = sum1;
  614. *pxC++ = sum2;
  615. *pxD++ = sum3;
  616. *pxE++ = sum4;
  617. *pxF++ = sum5;
  618. *pxG++ = sum6;
  619. *pxH++ = sum7;
  620. /* Update the pointer pIn2 to point to the starting address of the next column */
  621. j++;
  622. pIn2 = pSrcB->pData + j;
  623. /* Decrement the column loop counter */
  624. col--;
  625. } while (col > 0U);
  626. /* Update the pointer pInA to point to the starting address of the next row */
  627. i = i + numColsB;
  628. pInA = pInA + GROUPOFROWS*numColsA;
  629. /* Decrement the row loop counter */
  630. rowCnt--;
  631. }
  632. /*
  633. i was the index of a group of rows computed by previous loop.
  634. Now i is the index of a row since below code is computing row per row
  635. and no more group of row per group of rows.
  636. */
  637. i = GROUPOFROWS*i;
  638. rowCnt = row & 7;
  639. while(rowCnt > 0)
  640. {
  641. /* Output pointer is set to starting address of the row being processed */
  642. px = pOut + i;
  643. /* For every row wise process, the column loop counter is to be initiated */
  644. col = numColsB;
  645. /* For every row wise process, the pIn2 pointer is set
  646. ** to the starting address of the pSrcB data */
  647. pIn2 = pSrcB->pData;
  648. j = 0U;
  649. /* Column loop */
  650. do
  651. {
  652. /* Set the variable sum, that acts as accumulator, to zero */
  653. sum = 0.0f;
  654. /* Initiate the pointer pIn1 to point to the starting address of the column being processed */
  655. pIn1 = pInA;
  656. acc0 = vdupq_n_f32(0.0);
  657. /* Compute 4 MACs simultaneously. */
  658. colCnt = numColsA >> 2U;
  659. /* Matrix multiplication */
  660. while (colCnt > 0U)
  661. {
  662. /* c(m,n) = a(1,1)*b(1,1) + a(1,2)*b(2,1) + ... + a(m,p)*b(p,n) */
  663. a0V = vld1q_f32(pIn1); // load & separate real/imag pSrcA (de-interleave 2)
  664. pIn1 += 4;
  665. temp = vsetq_lane_f32(*pIn2,temp,0);
  666. pIn2 += numColsB;
  667. temp = vsetq_lane_f32(*pIn2,temp,1);
  668. pIn2 += numColsB;
  669. temp = vsetq_lane_f32(*pIn2,temp,2);
  670. pIn2 += numColsB;
  671. temp = vsetq_lane_f32(*pIn2,temp,3);
  672. pIn2 += numColsB;
  673. acc0 = vmlaq_f32(acc0,a0V,temp);
  674. /* Decrement the loop count */
  675. colCnt--;
  676. }
  677. accum = vpadd_f32(vget_low_f32(acc0), vget_high_f32(acc0));
  678. sum += vget_lane_f32(accum, 0) + vget_lane_f32(accum, 1);
  679. /* If the columns of pSrcA is not a multiple of 4, compute any remaining MACs here.
  680. ** No loop unrolling is used. */
  681. colCnt = numColsA % 0x4U;
  682. while (colCnt > 0U)
  683. {
  684. /* c(m,n) = a(1,1)*b(1,1) + a(1,2)*b(2,1) + ... + a(m,p)*b(p,n) */
  685. sum += *pIn1++ * (*pIn2);
  686. pIn2 += numColsB;
  687. /* Decrement the loop counter */
  688. colCnt--;
  689. }
  690. /* Store the result in the destination buffer */
  691. *px++ = sum;
  692. /* Update the pointer pIn2 to point to the starting address of the next column */
  693. j++;
  694. pIn2 = pSrcB->pData + j;
  695. /* Decrement the column loop counter */
  696. col--;
  697. } while (col > 0U);
  698. /* Update the pointer pInA to point to the starting address of the next row */
  699. i = i + numColsB;
  700. pInA = pInA + numColsA;
  701. /* Decrement the row loop counter */
  702. rowCnt--;
  703. }
  704. /* Set status as ARM_MATH_SUCCESS */
  705. status = ARM_MATH_SUCCESS;
  706. }
  707. /* Return to application */
  708. return (status);
  709. }
  710. #else
  711. /**
  712. * @brief Floating-point matrix multiplication.
  713. * @param[in] *pSrcA points to the first input matrix structure
  714. * @param[in] *pSrcB points to the second input matrix structure
  715. * @param[out] *pDst points to output matrix structure
  716. * @return The function returns either
  717. * <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking.
  718. */
  719. arm_status arm_mat_mult_f32(
  720. const arm_matrix_instance_f32 * pSrcA,
  721. const arm_matrix_instance_f32 * pSrcB,
  722. arm_matrix_instance_f32 * pDst)
  723. {
  724. float32_t *pIn1 = pSrcA->pData; /* Input data matrix pointer A */
  725. float32_t *pIn2 = pSrcB->pData; /* Input data matrix pointer B */
  726. float32_t *pInA = pSrcA->pData; /* Input data matrix pointer A */
  727. float32_t *pInB = pSrcB->pData; /* Input data matrix pointer B */
  728. float32_t *pOut = pDst->pData; /* Output data matrix pointer */
  729. float32_t *px; /* Temporary output data matrix pointer */
  730. float32_t sum; /* Accumulator */
  731. uint16_t numRowsA = pSrcA->numRows; /* Number of rows of input matrix A */
  732. uint16_t numColsB = pSrcB->numCols; /* Number of columns of input matrix B */
  733. uint16_t numColsA = pSrcA->numCols; /* Number of columns of input matrix A */
  734. uint32_t col, i = 0U, row = numRowsA, colCnt; /* Loop counters */
  735. arm_status status; /* Status of matrix multiplication */
  736. #ifdef ARM_MATH_MATRIX_CHECK
  737. /* Check for matrix mismatch condition */
  738. if ((pSrcA->numCols != pSrcB->numRows) ||
  739. (pSrcA->numRows != pDst->numRows) ||
  740. (pSrcB->numCols != pDst->numCols) )
  741. {
  742. /* Set status as ARM_MATH_SIZE_MISMATCH */
  743. status = ARM_MATH_SIZE_MISMATCH;
  744. }
  745. else
  746. #endif /* #ifdef ARM_MATH_MATRIX_CHECK */
  747. {
  748. /* The following loop performs the dot-product of each row in pSrcA with each column in pSrcB */
  749. /* row loop */
  750. do
  751. {
  752. /* Output pointer is set to starting address of row being processed */
  753. px = pOut + i;
  754. /* For every row wise process, column loop counter is to be initiated */
  755. col = numColsB;
  756. /* For every row wise process, pIn2 pointer is set to starting address of pSrcB data */
  757. pIn2 = pSrcB->pData;
  758. /* column loop */
  759. do
  760. {
  761. /* Set the variable sum, that acts as accumulator, to zero */
  762. sum = 0.0f;
  763. /* Initialize pointer pIn1 to point to starting address of column being processed */
  764. pIn1 = pInA;
  765. #if defined (ARM_MATH_LOOPUNROLL)
  766. /* Loop unrolling: Compute 4 MACs at a time. */
  767. colCnt = numColsA >> 2U;
  768. /* matrix multiplication */
  769. while (colCnt > 0U)
  770. {
  771. /* c(m,p) = a(m,1) * b(1,p) + a(m,2) * b(2,p) + .... + a(m,n) * b(n,p) */
  772. /* Perform the multiply-accumulates */
  773. sum += *pIn1++ * *pIn2;
  774. pIn2 += numColsB;
  775. sum += *pIn1++ * *pIn2;
  776. pIn2 += numColsB;
  777. sum += *pIn1++ * *pIn2;
  778. pIn2 += numColsB;
  779. sum += *pIn1++ * *pIn2;
  780. pIn2 += numColsB;
  781. /* Decrement loop counter */
  782. colCnt--;
  783. }
  784. /* Loop unrolling: Compute remaining MACs */
  785. colCnt = numColsA % 0x4U;
  786. #else
  787. /* Initialize cntCnt with number of columns */
  788. colCnt = numColsA;
  789. #endif /* #if defined (ARM_MATH_LOOPUNROLL) */
  790. while (colCnt > 0U)
  791. {
  792. /* c(m,p) = a(m,1) * b(1,p) + a(m,2) * b(2,p) + .... + a(m,n) * b(n,p) */
  793. /* Perform the multiply-accumulates */
  794. sum += *pIn1++ * *pIn2;
  795. pIn2 += numColsB;
  796. /* Decrement loop counter */
  797. colCnt--;
  798. }
  799. /* Store result in destination buffer */
  800. *px++ = sum;
  801. /* Decrement column loop counter */
  802. col--;
  803. /* Update pointer pIn2 to point to starting address of next column */
  804. pIn2 = pInB + (numColsB - col);
  805. } while (col > 0U);
  806. /* Update pointer pInA to point to starting address of next row */
  807. i = i + numColsB;
  808. pInA = pInA + numColsA;
  809. /* Decrement row loop counter */
  810. row--;
  811. } while (row > 0U);
  812. /* Set status as ARM_MATH_SUCCESS */
  813. status = ARM_MATH_SUCCESS;
  814. }
  815. /* Return to application */
  816. return (status);
  817. }
  818. #endif /* #if defined(ARM_MATH_NEON) */
  819. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  820. /**
  821. * @} end of MatrixMult group
  822. */