arm_mat_cmplx_mult_q31.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_mat_cmplx_mult_q31.c
  4. * Description: Floating-point matrix multiplication
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/matrix_functions.h"
  29. /**
  30. @ingroup groupMatrix
  31. */
  32. /**
  33. @addtogroup CmplxMatrixMult
  34. @{
  35. */
  36. /**
  37. @brief Q31 Complex matrix multiplication.
  38. @param[in] pSrcA points to first input complex matrix structure
  39. @param[in] pSrcB points to second input complex matrix structure
  40. @param[out] pDst points to output complex matrix structure
  41. @return execution status
  42. - \ref ARM_MATH_SUCCESS : Operation successful
  43. - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed
  44. @par Scaling and Overflow Behavior
  45. The function is implemented using an internal 64-bit accumulator.
  46. The accumulator has a 2.62 format and maintains full precision of the intermediate
  47. multiplication results but provides only a single guard bit. There is no saturation
  48. on intermediate additions. Thus, if the accumulator overflows it wraps around and
  49. distorts the result. The input signals should be scaled down to avoid intermediate
  50. overflows. The input is thus scaled down by log2(numColsA) bits
  51. to avoid overflows, as a total of numColsA additions are performed internally.
  52. The 2.62 accumulator is right shifted by 31 bits and saturated to 1.31 format to yield the final result.
  53. */
  54. #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
  55. #include "arm_helium_utils.h"
  56. #define MATRIX_DIM2 2
  57. #define MATRIX_DIM3 3
  58. #define MATRIX_DIM4 4
  59. __STATIC_INLINE arm_status arm_mat_cmplx_mult_q31_2x2_mve(
  60. const arm_matrix_instance_q31 * pSrcA,
  61. const arm_matrix_instance_q31 * pSrcB,
  62. arm_matrix_instance_q31 * pDst)
  63. {
  64. q31_t const *pInB = pSrcB->pData; /* input data matrix pointer B */
  65. q31_t const *pInA = pSrcA->pData; /* input data matrix pointer A */
  66. q31_t *pOut = pDst->pData; /* output data matrix pointer */
  67. uint32x4_t vecColBOffs0;
  68. q31_t const *pInA0 = pInA;
  69. q31_t const *pInA1 = pInA0 + CMPLX_DIM * MATRIX_DIM2;
  70. q63_t acc0, acc1, acc2, acc3;
  71. q31x4_t vecB, vecA;
  72. static const uint32_t offsetB0[4] = {
  73. 0, 1,
  74. MATRIX_DIM2 * CMPLX_DIM, MATRIX_DIM2 * CMPLX_DIM + 1
  75. };
  76. vecColBOffs0 = vldrwq_u32(offsetB0);
  77. pInB = (q31_t const *) pSrcB->pData;
  78. vecB = vldrwq_gather_shifted_offset(pInB, vecColBOffs0);
  79. vecA = vldrwq_s32(pInA0);
  80. acc0 = vmlsldavq_s32(vecA, vecB);
  81. acc1 = vmlaldavxq_s32(vecA, vecB);
  82. vecA = vldrwq_s32(pInA1);
  83. acc2 = vmlsldavq_s32(vecA, vecB);
  84. acc3 = vmlaldavxq_s32(vecA, vecB);
  85. pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 0] = (q31_t) asrl(acc0, 31);
  86. pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc1, 31);
  87. pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 0] = (q31_t) asrl(acc2, 31);
  88. pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc3, 31);
  89. /*
  90. * move to next B column
  91. */
  92. pInB = pInB + CMPLX_DIM;
  93. vecB = vldrwq_gather_shifted_offset(pInB, vecColBOffs0);
  94. vecA = vldrwq_s32(pInA0);
  95. acc0 = vmlsldavq_s32(vecA, vecB);
  96. acc1 = vmlaldavxq_s32(vecA, vecB);
  97. vecA = vldrwq_s32(pInA1);
  98. acc2 = vmlsldavq_s32(vecA, vecB);
  99. acc3 = vmlaldavxq_s32(vecA, vecB);
  100. pOut += CMPLX_DIM;
  101. pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 0] = (q31_t) asrl(acc0, 31);
  102. pOut[0 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc1, 31);
  103. pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 0] = (q31_t) asrl(acc2, 31);
  104. pOut[1 * CMPLX_DIM * MATRIX_DIM2 + 1] = (q31_t) asrl(acc3, 31);
  105. /*
  106. * Return to application
  107. */
  108. return (ARM_MATH_SUCCESS);
  109. }
  110. __STATIC_INLINE arm_status arm_mat_cmplx_mult_q31_3x3_mve(
  111. const arm_matrix_instance_q31 * pSrcA,
  112. const arm_matrix_instance_q31 * pSrcB,
  113. arm_matrix_instance_q31 * pDst)
  114. {
  115. q31_t const *pInB = pSrcB->pData; /* input data matrix pointer B */
  116. q31_t const *pInA = pSrcA->pData; /* input data matrix pointer A */
  117. q31_t *pOut = pDst->pData; /* output data matrix pointer */
  118. uint32x4_t vecColBOffs0, vecColBOffs1;
  119. q31_t const *pInA0 = pInA;
  120. q31_t const *pInA1 = pInA0 + CMPLX_DIM * MATRIX_DIM3;
  121. q31_t const *pInA2 = pInA1 + CMPLX_DIM * MATRIX_DIM3;
  122. q63_t acc0, acc1, acc2, acc3;
  123. q31x4_t vecB, vecB1, vecA;
  124. /*
  125. * enable predication to disable upper half complex vector element
  126. */
  127. mve_pred16_t p0 = vctp32q(CMPLX_DIM);
  128. static const uint32_t offsetB0[4] = {
  129. 0, 1,
  130. MATRIX_DIM3 * CMPLX_DIM, MATRIX_DIM3 * CMPLX_DIM + 1
  131. };
  132. static const uint32_t offsetB1[4] = {
  133. 2 * MATRIX_DIM3 * CMPLX_DIM, 2 * MATRIX_DIM3 * CMPLX_DIM + 1,
  134. INACTIVELANE, INACTIVELANE
  135. };
  136. vecColBOffs0 = vldrwq_u32(offsetB0);
  137. vecColBOffs1 = vldrwq_u32(offsetB1);
  138. pInB = (q31_t const *) pSrcB->pData;
  139. vecB = vldrwq_gather_shifted_offset(pInB, vecColBOffs0);
  140. vecB1 = vldrwq_gather_shifted_offset(pInB, vecColBOffs1);
  141. vecA = vldrwq_s32(pInA0);
  142. acc0 = vmlsldavq_s32(vecA, vecB);
  143. acc1 = vmlaldavxq_s32(vecA, vecB);
  144. vecA = vldrwq_s32(pInA1);
  145. acc2 = vmlsldavq_s32(vecA, vecB);
  146. acc3 = vmlaldavxq_s32(vecA, vecB);
  147. vecA = vldrwq_z_s32(&pInA0[4], p0);
  148. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  149. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  150. vecA = vldrwq_z_s32(&pInA1[4], p0);
  151. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  152. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  153. pOut[0 * CMPLX_DIM * MATRIX_DIM3 + 0] = (q31_t) asrl(acc0, 31);
  154. pOut[0 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc1, 31);
  155. pOut[1 * CMPLX_DIM * MATRIX_DIM3 + 0] = (q31_t) asrl(acc2, 31);
  156. pOut[1 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc3, 31);
  157. vecA = vldrwq_s32(pInA2);
  158. acc0 = vmlsldavq_s32(vecA, vecB);
  159. acc1 = vmlaldavxq_s32(vecA, vecB);
  160. vecA = vldrwq_z_s32(&pInA2[4], p0);
  161. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  162. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  163. pOut[2 * CMPLX_DIM * MATRIX_DIM3 + 0] = (q31_t) asrl(acc0, 31);
  164. pOut[2 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc1, 31);
  165. pOut += CMPLX_DIM;
  166. /*
  167. * move to next B column
  168. */
  169. pInB = pInB + CMPLX_DIM;
  170. vecB = vldrwq_gather_shifted_offset(pInB, vecColBOffs0);
  171. vecB1 = vldrwq_gather_shifted_offset(pInB, vecColBOffs1);
  172. vecA = vldrwq_s32(pInA0);
  173. acc0 = vmlsldavq_s32(vecA, vecB);
  174. acc1 = vmlaldavxq_s32(vecA, vecB);
  175. vecA = vldrwq_s32(pInA1);
  176. acc2 = vmlsldavq_s32(vecA, vecB);
  177. acc3 = vmlaldavxq_s32(vecA, vecB);
  178. vecA = vldrwq_z_s32(&pInA0[4], p0);
  179. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  180. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  181. vecA = vldrwq_z_s32(&pInA1[4], p0);
  182. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  183. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  184. pOut[0 * CMPLX_DIM * MATRIX_DIM3 + 0] = (q31_t) asrl(acc0, 31);
  185. pOut[0 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc1, 31);
  186. pOut[1 * CMPLX_DIM * MATRIX_DIM3 + 0] = (q31_t) asrl(acc2, 31);
  187. pOut[1 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc3, 31);
  188. vecA = vldrwq_s32(pInA2);
  189. acc0 = vmlsldavq_s32(vecA, vecB);
  190. acc1 = vmlaldavxq_s32(vecA, vecB);
  191. vecA = vldrwq_z_s32(&pInA2[4], p0);
  192. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  193. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  194. pOut[2 * CMPLX_DIM * MATRIX_DIM3 + 0] = (q31_t) asrl(acc0, 31);
  195. pOut[2 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc1, 31);
  196. pOut += CMPLX_DIM;
  197. /*
  198. * move to next B column
  199. */
  200. pInB = pInB + CMPLX_DIM;
  201. vecB = vldrwq_gather_shifted_offset(pInB, vecColBOffs0);
  202. vecB1 = vldrwq_gather_shifted_offset(pInB, vecColBOffs1);
  203. vecA = vldrwq_s32(pInA0);
  204. acc0 = vmlsldavq_s32(vecA, vecB);
  205. acc1 = vmlaldavxq_s32(vecA, vecB);
  206. vecA = vldrwq_s32(pInA1);
  207. acc2 = vmlsldavq_s32(vecA, vecB);
  208. acc3 = vmlaldavxq_s32(vecA, vecB);
  209. vecA = vldrwq_z_s32(&pInA0[4], p0);
  210. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  211. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  212. vecA = vldrwq_z_s32(&pInA1[4], p0);
  213. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  214. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  215. pOut[0 * CMPLX_DIM * MATRIX_DIM3 + 0] = (q31_t) asrl(acc0, 31);
  216. pOut[0 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc1, 31);
  217. pOut[1 * CMPLX_DIM * MATRIX_DIM3 + 0] = (q31_t) asrl(acc2, 31);
  218. pOut[1 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc3, 31);
  219. vecA = vldrwq_s32(pInA2);
  220. acc0 = vmlsldavq_s32(vecA, vecB);
  221. acc1 = vmlaldavxq_s32(vecA, vecB);
  222. vecA = vldrwq_z_s32(&pInA2[4], p0);
  223. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  224. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  225. pOut[2 * CMPLX_DIM * MATRIX_DIM3 + 0] = (q31_t) asrl(acc0, 31);
  226. pOut[2 * CMPLX_DIM * MATRIX_DIM3 + 1] = (q31_t) asrl(acc1, 31);
  227. /*
  228. * Return to application
  229. */
  230. return (ARM_MATH_SUCCESS);
  231. }
  232. __STATIC_INLINE arm_status arm_mat_cmplx_mult_q31_4x4_mve(
  233. const arm_matrix_instance_q31 * pSrcA,
  234. const arm_matrix_instance_q31 * pSrcB,
  235. arm_matrix_instance_q31 * pDst)
  236. {
  237. q31_t const *pInB = pSrcB->pData; /* input data matrix pointer B */
  238. q31_t const *pInA = pSrcA->pData; /* input data matrix pointer A */
  239. q31_t *pOut = pDst->pData; /* output data matrix pointer */
  240. uint32x4_t vecColBOffs0, vecColBOffs1;
  241. q31_t const *pInA0 = pInA;
  242. q31_t const *pInA1 = pInA0 + CMPLX_DIM * MATRIX_DIM4;
  243. q31_t const *pInA2 = pInA1 + CMPLX_DIM * MATRIX_DIM4;
  244. q31_t const *pInA3 = pInA2 + CMPLX_DIM * MATRIX_DIM4;
  245. q63_t acc0, acc1, acc2, acc3;
  246. q31x4_t vecB, vecB1, vecA;
  247. static const uint32_t offsetB0[4] = {
  248. 0, 1,
  249. MATRIX_DIM4 * CMPLX_DIM, MATRIX_DIM4 * CMPLX_DIM + 1
  250. };
  251. static const uint32_t offsetB1[4] = {
  252. 2 * MATRIX_DIM4 * CMPLX_DIM, 2 * MATRIX_DIM4 * CMPLX_DIM + 1,
  253. 3 * MATRIX_DIM4 * CMPLX_DIM, 3 * MATRIX_DIM4 * CMPLX_DIM + 1
  254. };
  255. vecColBOffs0 = vldrwq_u32(offsetB0);
  256. vecColBOffs1 = vldrwq_u32(offsetB1);
  257. pInB = (q31_t const *) pSrcB->pData;
  258. vecB = vldrwq_gather_shifted_offset(pInB, vecColBOffs0);
  259. vecB1 = vldrwq_gather_shifted_offset(pInB, vecColBOffs1);
  260. vecA = vldrwq_s32(pInA0);
  261. acc0 = vmlsldavq_s32(vecA, vecB);
  262. acc1 = vmlaldavxq_s32(vecA, vecB);
  263. vecA = vldrwq_s32(pInA1);
  264. acc2 = vmlsldavq_s32(vecA, vecB);
  265. acc3 = vmlaldavxq_s32(vecA, vecB);
  266. vecA = vldrwq_s32(&pInA0[4]);
  267. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  268. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  269. vecA = vldrwq_s32(&pInA1[4]);
  270. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  271. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  272. pOut[0 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc0, 31);
  273. pOut[0 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc1, 31);
  274. pOut[1 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc2, 31);
  275. pOut[1 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc3, 31);
  276. vecA = vldrwq_s32(pInA2);
  277. acc0 = vmlsldavq_s32(vecA, vecB);
  278. acc1 = vmlaldavxq_s32(vecA, vecB);
  279. vecA = vldrwq_s32(pInA3);
  280. acc2 = vmlsldavq_s32(vecA, vecB);
  281. acc3 = vmlaldavxq_s32(vecA, vecB);
  282. vecA = vldrwq_s32(&pInA2[4]);
  283. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  284. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  285. vecA = vldrwq_s32(&pInA3[4]);
  286. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  287. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  288. pOut[2 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc0, 31);
  289. pOut[2 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc1, 31);
  290. pOut[3 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc2, 31);
  291. pOut[3 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc3, 31);
  292. pOut += CMPLX_DIM;
  293. /*
  294. * move to next B column
  295. */
  296. pInB = pInB + CMPLX_DIM;
  297. vecB = vldrwq_gather_shifted_offset(pInB, vecColBOffs0);
  298. vecB1 = vldrwq_gather_shifted_offset(pInB, vecColBOffs1);
  299. vecA = vldrwq_s32(pInA0);
  300. acc0 = vmlsldavq_s32(vecA, vecB);
  301. acc1 = vmlaldavxq_s32(vecA, vecB);
  302. vecA = vldrwq_s32(pInA1);
  303. acc2 = vmlsldavq_s32(vecA, vecB);
  304. acc3 = vmlaldavxq_s32(vecA, vecB);
  305. vecA = vldrwq_s32(&pInA0[4]);
  306. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  307. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  308. vecA = vldrwq_s32(&pInA1[4]);
  309. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  310. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  311. pOut[0 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc0, 31);
  312. pOut[0 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc1, 31);
  313. pOut[1 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc2, 31);
  314. pOut[1 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc3, 31);
  315. vecA = vldrwq_s32(pInA2);
  316. acc0 = vmlsldavq_s32(vecA, vecB);
  317. acc1 = vmlaldavxq_s32(vecA, vecB);
  318. vecA = vldrwq_s32(pInA3);
  319. acc2 = vmlsldavq_s32(vecA, vecB);
  320. acc3 = vmlaldavxq_s32(vecA, vecB);
  321. vecA = vldrwq_s32(&pInA2[4]);
  322. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  323. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  324. vecA = vldrwq_s32(&pInA3[4]);
  325. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  326. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  327. pOut[2 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc0, 31);
  328. pOut[2 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc1, 31);
  329. pOut[3 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc2, 31);
  330. pOut[3 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc3, 31);
  331. pOut += CMPLX_DIM;
  332. /*
  333. * move to next B column
  334. */
  335. pInB = pInB + CMPLX_DIM;
  336. vecB = vldrwq_gather_shifted_offset(pInB, vecColBOffs0);
  337. vecB1 = vldrwq_gather_shifted_offset(pInB, vecColBOffs1);
  338. vecA = vldrwq_s32(pInA0);
  339. acc0 = vmlsldavq_s32(vecA, vecB);
  340. acc1 = vmlaldavxq_s32(vecA, vecB);
  341. vecA = vldrwq_s32(pInA1);
  342. acc2 = vmlsldavq_s32(vecA, vecB);
  343. acc3 = vmlaldavxq_s32(vecA, vecB);
  344. vecA = vldrwq_s32(&pInA0[4]);
  345. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  346. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  347. vecA = vldrwq_s32(&pInA1[4]);
  348. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  349. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  350. pOut[0 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc0, 31);
  351. pOut[0 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc1, 31);
  352. pOut[1 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc2, 31);
  353. pOut[1 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc3, 31);
  354. vecA = vldrwq_s32(pInA2);
  355. acc0 = vmlsldavq_s32(vecA, vecB);
  356. acc1 = vmlaldavxq_s32(vecA, vecB);
  357. vecA = vldrwq_s32(pInA3);
  358. acc2 = vmlsldavq_s32(vecA, vecB);
  359. acc3 = vmlaldavxq_s32(vecA, vecB);
  360. vecA = vldrwq_s32(&pInA2[4]);
  361. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  362. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  363. vecA = vldrwq_s32(&pInA3[4]);
  364. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  365. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  366. pOut[2 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc0, 31);
  367. pOut[2 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc1, 31);
  368. pOut[3 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc2, 31);
  369. pOut[3 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc3, 31);
  370. pOut += CMPLX_DIM;
  371. /*
  372. * move to next B column
  373. */
  374. pInB = pInB + CMPLX_DIM;
  375. vecB = vldrwq_gather_shifted_offset(pInB, vecColBOffs0);
  376. vecB1 = vldrwq_gather_shifted_offset(pInB, vecColBOffs1);
  377. vecA = vldrwq_s32(pInA0);
  378. acc0 = vmlsldavq_s32(vecA, vecB);
  379. acc1 = vmlaldavxq_s32(vecA, vecB);
  380. vecA = vldrwq_s32(pInA1);
  381. acc2 = vmlsldavq_s32(vecA, vecB);
  382. acc3 = vmlaldavxq_s32(vecA, vecB);
  383. vecA = vldrwq_s32(&pInA0[4]);
  384. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  385. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  386. vecA = vldrwq_s32(&pInA1[4]);
  387. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  388. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  389. pOut[0 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc0, 31);
  390. pOut[0 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc1, 31);
  391. pOut[1 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc2, 31);
  392. pOut[1 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc3, 31);
  393. vecA = vldrwq_s32(pInA2);
  394. acc0 = vmlsldavq_s32(vecA, vecB);
  395. acc1 = vmlaldavxq_s32(vecA, vecB);
  396. vecA = vldrwq_s32(pInA3);
  397. acc2 = vmlsldavq_s32(vecA, vecB);
  398. acc3 = vmlaldavxq_s32(vecA, vecB);
  399. vecA = vldrwq_s32(&pInA2[4]);
  400. acc0 = vmlsldavaq_s32(acc0, vecA, vecB1);
  401. acc1 = vmlaldavaxq_s32(acc1, vecA, vecB1);
  402. vecA = vldrwq_s32(&pInA3[4]);
  403. acc2 = vmlsldavaq_s32(acc2, vecA, vecB1);
  404. acc3 = vmlaldavaxq_s32(acc3, vecA, vecB1);
  405. pOut[2 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc0, 31);
  406. pOut[2 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc1, 31);
  407. pOut[3 * CMPLX_DIM * MATRIX_DIM4 + 0] = (q31_t) asrl(acc2, 31);
  408. pOut[3 * CMPLX_DIM * MATRIX_DIM4 + 1] = (q31_t) asrl(acc3, 31);
  409. /*
  410. * Return to application
  411. */
  412. return (ARM_MATH_SUCCESS);
  413. }
  414. arm_status arm_mat_cmplx_mult_q31(
  415. const arm_matrix_instance_q31 * pSrcA,
  416. const arm_matrix_instance_q31 * pSrcB,
  417. arm_matrix_instance_q31 * pDst)
  418. {
  419. q31_t const *pInB = (q31_t const *) pSrcB->pData; /* input data matrix pointer B */
  420. q31_t const *pInA = (q31_t const *) pSrcA->pData; /* input data matrix pointer A */
  421. q31_t *pOut = pDst->pData; /* output data matrix pointer */
  422. q31_t *px; /* Temporary output data matrix pointer */
  423. uint16_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */
  424. uint16_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */
  425. uint16_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */
  426. uint16_t col, i = 0U, row = numRowsA; /* loop counters */
  427. arm_status status; /* status of matrix multiplication */
  428. uint32x4_t vecOffs, vecColBOffs;
  429. uint32_t blkCnt, rowCnt; /* loop counters */
  430. #ifdef ARM_MATH_MATRIX_CHECK
  431. /* Check for matrix mismatch condition */
  432. if ((pSrcA->numCols != pSrcB->numRows) ||
  433. (pSrcA->numRows != pDst->numRows) || (pSrcB->numCols != pDst->numCols))
  434. {
  435. /* Set status as ARM_MATH_SIZE_MISMATCH */
  436. status = ARM_MATH_SIZE_MISMATCH;
  437. }
  438. else
  439. #endif /* #ifdef ARM_MATH_MATRIX_CHECK */
  440. {
  441. /*
  442. * small squared matrix specialized routines
  443. */
  444. if (numRowsA == numColsB && numColsB == numColsA)
  445. {
  446. if (numRowsA == 1)
  447. {
  448. q63_t sumReal = (q63_t) pInA[0] * pInB[0];
  449. sumReal -= (q63_t) pInA[1] * pInB[1];
  450. q63_t sumImag = (q63_t) pInA[0] * pInB[1];
  451. sumImag += (q63_t) pInA[1] * pInB[0];
  452. /* Store result in destination buffer */
  453. pOut[0] = (q31_t) clip_q63_to_q31(sumReal >> 31);
  454. pOut[1] = (q31_t) clip_q63_to_q31(sumImag >> 31);
  455. return (ARM_MATH_SUCCESS);
  456. }
  457. else if (numRowsA == 2)
  458. return arm_mat_cmplx_mult_q31_2x2_mve(pSrcA, pSrcB, pDst);
  459. else if (numRowsA == 3)
  460. return arm_mat_cmplx_mult_q31_3x3_mve(pSrcA, pSrcB, pDst);
  461. else if (numRowsA == 4)
  462. return arm_mat_cmplx_mult_q31_4x4_mve(pSrcA, pSrcB, pDst);
  463. }
  464. vecColBOffs[0] = 0;
  465. vecColBOffs[1] = 1;
  466. vecColBOffs[2] = numColsB * CMPLX_DIM;
  467. vecColBOffs[3] = (numColsB * CMPLX_DIM) + 1;
  468. /*
  469. * The following loop performs the dot-product of each row in pSrcA with each column in pSrcB
  470. */
  471. /*
  472. * row loop
  473. */
  474. rowCnt = row >> 1;
  475. while (rowCnt > 0u)
  476. {
  477. /*
  478. * Output pointer is set to starting address of the row being processed
  479. */
  480. px = pOut + i * CMPLX_DIM;
  481. i = i + 2 * numColsB;
  482. /*
  483. * For every row wise process, the column loop counter is to be initiated
  484. */
  485. col = numColsB;
  486. /*
  487. * For every row wise process, the pInB pointer is set
  488. * to the starting address of the pSrcB data
  489. */
  490. pInB = (q31_t const *) pSrcB->pData;
  491. /*
  492. * column loop
  493. */
  494. while (col > 0u)
  495. {
  496. /*
  497. * generate 4 columns elements
  498. */
  499. /*
  500. * Matrix A columns number of MAC operations are to be performed
  501. */
  502. q31_t const *pSrcA0Vec, *pSrcA1Vec;
  503. q31_t const *pInA0 = pInA;
  504. q31_t const *pInA1 = pInA0 + numColsA * CMPLX_DIM;
  505. q63_t acc0, acc1, acc2, acc3;
  506. acc0 = 0LL;
  507. acc1 = 0LL;
  508. acc2 = 0LL;
  509. acc3 = 0LL;
  510. pSrcA0Vec = (q31_t const *) pInA0;
  511. pSrcA1Vec = (q31_t const *) pInA1;
  512. vecOffs = vecColBOffs;
  513. /*
  514. * process 1 x 2 block output
  515. */
  516. blkCnt = (numColsA * CMPLX_DIM) >> 2;
  517. while (blkCnt > 0U)
  518. {
  519. q31x4_t vecB, vecA;
  520. vecB = vldrwq_gather_shifted_offset(pInB, vecOffs);
  521. /*
  522. * move Matrix B read offsets, 2 rows down
  523. */
  524. vecOffs = vecOffs + (uint32_t) (numColsB * 2 * CMPLX_DIM);
  525. vecA = vld1q(pSrcA0Vec);
  526. pSrcA0Vec += 4;
  527. acc0 = vmlsldavaq(acc0, vecA, vecB);
  528. acc1 = vmlaldavaxq(acc1, vecA, vecB);
  529. vecA = vld1q(pSrcA1Vec);
  530. pSrcA1Vec += 4;
  531. acc2 = vmlsldavaq(acc2, vecA, vecB);
  532. acc3 = vmlaldavaxq(acc3, vecA, vecB);
  533. blkCnt--;
  534. }
  535. /*
  536. * tail
  537. */
  538. blkCnt = (numColsA * CMPLX_DIM) & 3;
  539. if (blkCnt > 0U)
  540. {
  541. mve_pred16_t p0 = vctp32q(blkCnt);
  542. q31x4_t vecB, vecA;
  543. vecB = vldrwq_gather_shifted_offset_z(pInB, vecOffs, p0);
  544. /*
  545. * move Matrix B read offsets, 2 rows down
  546. */
  547. vecOffs = vecOffs + (uint32_t) (numColsB * 2 * CMPLX_DIM);
  548. vecA = vld1q(pSrcA0Vec);
  549. acc0 = vmlsldavaq(acc0, vecA, vecB);
  550. acc1 = vmlaldavaxq(acc1, vecA, vecB);
  551. vecA = vld1q(pSrcA1Vec);
  552. acc2 = vmlsldavaq(acc2, vecA, vecB);
  553. acc3 = vmlaldavaxq(acc3, vecA, vecB);
  554. }
  555. px[0 * CMPLX_DIM * numColsB + 0] = (q31_t) clip_q63_to_q31(acc0 >> 31);
  556. px[0 * CMPLX_DIM * numColsB + 1] = (q31_t) clip_q63_to_q31(acc1 >> 31);
  557. px[1 * CMPLX_DIM * numColsB + 0] = (q31_t) clip_q63_to_q31(acc2 >> 31);
  558. px[1 * CMPLX_DIM * numColsB + 1] = (q31_t) clip_q63_to_q31(acc3 >> 31);
  559. px += CMPLX_DIM;
  560. /*
  561. * Decrement the column loop counter
  562. */
  563. col--;
  564. /*
  565. * Update the pointer pInB to point to the starting address of the next column
  566. */
  567. pInB = (q31_t const *) pSrcB->pData + (numColsB - col) * CMPLX_DIM;
  568. }
  569. /*
  570. * Update the pointer pInA to point to the starting address of the next row
  571. */
  572. pInA += (numColsA * 2) * CMPLX_DIM;
  573. /*
  574. * Decrement the row loop counter
  575. */
  576. rowCnt --;
  577. }
  578. rowCnt = row & 1;
  579. while (rowCnt > 0u)
  580. {
  581. /*
  582. * Output pointer is set to starting address of the row being processed
  583. */
  584. px = pOut + i * CMPLX_DIM;
  585. i = i + numColsB;
  586. /*
  587. * For every row wise process, the column loop counter is to be initiated
  588. */
  589. col = numColsB;
  590. /*
  591. * For every row wise process, the pInB pointer is set
  592. * to the starting address of the pSrcB data
  593. */
  594. pInB = (q31_t const *) pSrcB->pData;
  595. /*
  596. * column loop
  597. */
  598. while (col > 0u)
  599. {
  600. /*
  601. * generate 4 columns elements
  602. */
  603. /*
  604. * Matrix A columns number of MAC operations are to be performed
  605. */
  606. q31_t const *pSrcA0Vec;
  607. q31_t const *pInA0 = pInA;
  608. q63_t acc0,acc1;
  609. acc0 = 0LL;
  610. acc1 = 0LL;
  611. pSrcA0Vec = (q31_t const *) pInA0;
  612. vecOffs = vecColBOffs;
  613. /*
  614. * process 1 x 2 block output
  615. */
  616. blkCnt = (numColsA * CMPLX_DIM) >> 2;
  617. while (blkCnt > 0U)
  618. {
  619. q31x4_t vecB, vecA;
  620. vecB = vldrwq_gather_shifted_offset(pInB, vecOffs);
  621. /*
  622. * move Matrix B read offsets, 2 rows down
  623. */
  624. vecOffs = vecOffs + (uint32_t) (numColsB * 2 * CMPLX_DIM);
  625. vecA = vld1q(pSrcA0Vec);
  626. pSrcA0Vec += 4;
  627. acc0 = vmlsldavaq(acc0, vecA, vecB);
  628. acc1 = vmlaldavaxq(acc1, vecA, vecB);
  629. blkCnt--;
  630. }
  631. /*
  632. * tail
  633. */
  634. blkCnt = (numColsA * CMPLX_DIM) & 3;
  635. if (blkCnt > 0U)
  636. {
  637. mve_pred16_t p0 = vctp32q(blkCnt);
  638. q31x4_t vecB, vecA;
  639. vecB = vldrwq_gather_shifted_offset_z(pInB, vecOffs, p0);
  640. /*
  641. * move Matrix B read offsets, 2 rows down
  642. */
  643. vecOffs = vecOffs + (uint32_t) (numColsB * 2 * CMPLX_DIM);
  644. vecA = vld1q(pSrcA0Vec);
  645. acc0 = vmlsldavaq(acc0, vecA, vecB);
  646. acc1 = vmlaldavaxq(acc1, vecA, vecB);
  647. }
  648. px[0] = (q31_t) clip_q63_to_q31(acc0 >> 31);
  649. px[1] = (q31_t) clip_q63_to_q31(acc1 >> 31);
  650. px += CMPLX_DIM;
  651. /*
  652. * Decrement the column loop counter
  653. */
  654. col--;
  655. /*
  656. * Update the pointer pInB to point to the starting address of the next column
  657. */
  658. pInB = (q31_t const *) pSrcB->pData + (numColsB - col) * CMPLX_DIM;
  659. }
  660. /*
  661. * Update the pointer pInA to point to the starting address of the next row
  662. */
  663. pInA += numColsA * CMPLX_DIM;
  664. rowCnt--;
  665. }
  666. /* Set status as ARM_MATH_SUCCESS */
  667. status = ARM_MATH_SUCCESS;
  668. }
  669. /* Return to application */
  670. return (status);
  671. }
  672. #else
  673. arm_status arm_mat_cmplx_mult_q31(
  674. const arm_matrix_instance_q31 * pSrcA,
  675. const arm_matrix_instance_q31 * pSrcB,
  676. arm_matrix_instance_q31 * pDst)
  677. {
  678. q31_t *pIn1 = pSrcA->pData; /* Input data matrix pointer A */
  679. q31_t *pIn2 = pSrcB->pData; /* Input data matrix pointer B */
  680. q31_t *pInA = pSrcA->pData; /* Input data matrix pointer A */
  681. q31_t *pOut = pDst->pData; /* Output data matrix pointer */
  682. q31_t *px; /* Temporary output data matrix pointer */
  683. uint16_t numRowsA = pSrcA->numRows; /* Number of rows of input matrix A */
  684. uint16_t numColsB = pSrcB->numCols; /* Number of columns of input matrix B */
  685. uint16_t numColsA = pSrcA->numCols; /* Number of columns of input matrix A */
  686. q63_t sumReal, sumImag; /* Accumulator */
  687. q31_t a1, b1, c1, d1;
  688. uint32_t col, i = 0U, j, row = numRowsA, colCnt; /* loop counters */
  689. arm_status status; /* status of matrix multiplication */
  690. #if defined (ARM_MATH_LOOPUNROLL)
  691. q31_t a0, b0, c0, d0;
  692. #endif
  693. #ifdef ARM_MATH_MATRIX_CHECK
  694. /* Check for matrix mismatch condition */
  695. if ((pSrcA->numCols != pSrcB->numRows) ||
  696. (pSrcA->numRows != pDst->numRows) ||
  697. (pSrcB->numCols != pDst->numCols) )
  698. {
  699. /* Set status as ARM_MATH_SIZE_MISMATCH */
  700. status = ARM_MATH_SIZE_MISMATCH;
  701. }
  702. else
  703. #endif /* #ifdef ARM_MATH_MATRIX_CHECK */
  704. {
  705. /* The following loop performs the dot-product of each row in pSrcA with each column in pSrcB */
  706. /* row loop */
  707. do
  708. {
  709. /* Output pointer is set to starting address of the row being processed */
  710. px = pOut + 2 * i;
  711. /* For every row wise process, the column loop counter is to be initiated */
  712. col = numColsB;
  713. /* For every row wise process, the pIn2 pointer is set
  714. ** to the starting address of the pSrcB data */
  715. pIn2 = pSrcB->pData;
  716. j = 0U;
  717. /* column loop */
  718. do
  719. {
  720. /* Set the variable sum, that acts as accumulator, to zero */
  721. sumReal = 0.0;
  722. sumImag = 0.0;
  723. /* Initiate pointer pIn1 to point to starting address of column being processed */
  724. pIn1 = pInA;
  725. #if defined (ARM_MATH_LOOPUNROLL)
  726. /* Apply loop unrolling and compute 4 MACs simultaneously. */
  727. colCnt = numColsA >> 2U;
  728. /* matrix multiplication */
  729. while (colCnt > 0U)
  730. {
  731. /* Reading real part of complex matrix A */
  732. a0 = *pIn1;
  733. /* Reading real part of complex matrix B */
  734. c0 = *pIn2;
  735. /* Reading imaginary part of complex matrix A */
  736. b0 = *(pIn1 + 1U);
  737. /* Reading imaginary part of complex matrix B */
  738. d0 = *(pIn2 + 1U);
  739. /* Multiply and Accumlates */
  740. sumReal += (q63_t) a0 * c0;
  741. sumImag += (q63_t) b0 * c0;
  742. /* update pointers */
  743. pIn1 += 2U;
  744. pIn2 += 2 * numColsB;
  745. /* Multiply and Accumlates */
  746. sumReal -= (q63_t) b0 * d0;
  747. sumImag += (q63_t) a0 * d0;
  748. /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */
  749. /* read real and imag values from pSrcA and pSrcB buffer */
  750. a1 = *(pIn1 );
  751. c1 = *(pIn2 );
  752. b1 = *(pIn1 + 1U);
  753. d1 = *(pIn2 + 1U);
  754. /* Multiply and Accumlates */
  755. sumReal += (q63_t) a1 * c1;
  756. sumImag += (q63_t) b1 * c1;
  757. /* update pointers */
  758. pIn1 += 2U;
  759. pIn2 += 2 * numColsB;
  760. /* Multiply and Accumlates */
  761. sumReal -= (q63_t) b1 * d1;
  762. sumImag += (q63_t) a1 * d1;
  763. a0 = *(pIn1 );
  764. c0 = *(pIn2 );
  765. b0 = *(pIn1 + 1U);
  766. d0 = *(pIn2 + 1U);
  767. /* Multiply and Accumlates */
  768. sumReal += (q63_t) a0 * c0;
  769. sumImag += (q63_t) b0 * c0;
  770. /* update pointers */
  771. pIn1 += 2U;
  772. pIn2 += 2 * numColsB;
  773. /* Multiply and Accumlates */
  774. sumReal -= (q63_t) b0 * d0;
  775. sumImag += (q63_t) a0 * d0;
  776. /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */
  777. a1 = *(pIn1 );
  778. c1 = *(pIn2 );
  779. b1 = *(pIn1 + 1U);
  780. d1 = *(pIn2 + 1U);
  781. /* Multiply and Accumlates */
  782. sumReal += (q63_t) a1 * c1;
  783. sumImag += (q63_t) b1 * c1;
  784. /* update pointers */
  785. pIn1 += 2U;
  786. pIn2 += 2 * numColsB;
  787. /* Multiply and Accumlates */
  788. sumReal -= (q63_t) b1 * d1;
  789. sumImag += (q63_t) a1 * d1;
  790. /* Decrement loop count */
  791. colCnt--;
  792. }
  793. /* If the columns of pSrcA is not a multiple of 4, compute any remaining MACs here.
  794. ** No loop unrolling is used. */
  795. colCnt = numColsA % 0x4U;
  796. #else
  797. /* Initialize blkCnt with number of samples */
  798. colCnt = numColsA;
  799. #endif /* #if defined (ARM_MATH_LOOPUNROLL) */
  800. while (colCnt > 0U)
  801. {
  802. /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */
  803. a1 = *(pIn1 );
  804. c1 = *(pIn2 );
  805. b1 = *(pIn1 + 1U);
  806. d1 = *(pIn2 + 1U);
  807. /* Multiply and Accumlates */
  808. sumReal += (q63_t) a1 * c1;
  809. sumImag += (q63_t) b1 * c1;
  810. /* update pointers */
  811. pIn1 += 2U;
  812. pIn2 += 2 * numColsB;
  813. /* Multiply and Accumlates */
  814. sumReal -= (q63_t) b1 * d1;
  815. sumImag += (q63_t) a1 * d1;
  816. /* Decrement loop counter */
  817. colCnt--;
  818. }
  819. /* Store result in destination buffer */
  820. *px++ = (q31_t) clip_q63_to_q31(sumReal >> 31);
  821. *px++ = (q31_t) clip_q63_to_q31(sumImag >> 31);
  822. /* Update pointer pIn2 to point to starting address of next column */
  823. j++;
  824. pIn2 = pSrcB->pData + 2U * j;
  825. /* Decrement column loop counter */
  826. col--;
  827. } while (col > 0U);
  828. /* Update pointer pInA to point to starting address of next row */
  829. i = i + numColsB;
  830. pInA = pInA + 2 * numColsA;
  831. /* Decrement row loop counter */
  832. row--;
  833. } while (row > 0U);
  834. /* Set status as ARM_MATH_SUCCESS */
  835. status = ARM_MATH_SUCCESS;
  836. }
  837. /* Return to application */
  838. return (status);
  839. }
  840. #endif /* defined(ARM_MATH_MVEI) */
  841. /**
  842. @} end of MatrixMult group
  843. */