arm_cmplx_mult_cmplx_f32.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cmplx_mult_cmplx_f32.c
  4. * Description: Floating-point complex-by-complex multiplication
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/complex_math_functions.h"
  29. /**
  30. @ingroup groupCmplxMath
  31. */
  32. /**
  33. @defgroup CmplxByCmplxMult Complex-by-Complex Multiplication
  34. Multiplies a complex vector by another complex vector and generates a complex result.
  35. The data in the complex arrays is stored in an interleaved fashion
  36. (real, imag, real, imag, ...).
  37. The parameter <code>numSamples</code> represents the number of complex
  38. samples processed. The complex arrays have a total of <code>2*numSamples</code>
  39. real values.
  40. The underlying algorithm is used:
  41. <pre>
  42. for (n = 0; n < numSamples; n++) {
  43. pDst[(2*n)+0] = pSrcA[(2*n)+0] * pSrcB[(2*n)+0] - pSrcA[(2*n)+1] * pSrcB[(2*n)+1];
  44. pDst[(2*n)+1] = pSrcA[(2*n)+0] * pSrcB[(2*n)+1] + pSrcA[(2*n)+1] * pSrcB[(2*n)+0];
  45. }
  46. </pre>
  47. There are separate functions for floating-point, Q15, and Q31 data types.
  48. */
  49. /**
  50. @addtogroup CmplxByCmplxMult
  51. @{
  52. */
  53. /**
  54. @brief Floating-point complex-by-complex multiplication.
  55. @param[in] pSrcA points to first input vector
  56. @param[in] pSrcB points to second input vector
  57. @param[out] pDst points to output vector
  58. @param[in] numSamples number of samples in each vector
  59. @return none
  60. */
  61. #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
  62. void arm_cmplx_mult_cmplx_f32(
  63. const float32_t * pSrcA,
  64. const float32_t * pSrcB,
  65. float32_t * pDst,
  66. uint32_t numSamples)
  67. {
  68. int32_t blkCnt;
  69. f32x4_t vecSrcA, vecSrcB;
  70. f32x4_t vecSrcC, vecSrcD;
  71. f32x4_t vec_acc;
  72. blkCnt = numSamples >> 2;
  73. blkCnt -= 1;
  74. if (blkCnt > 0) {
  75. /* should give more freedom to generate stall free code */
  76. vecSrcA = vld1q(pSrcA);
  77. vecSrcB = vld1q(pSrcB);
  78. pSrcA += 4;
  79. pSrcB += 4;
  80. while (blkCnt > 0) {
  81. vec_acc = vcmulq(vecSrcA, vecSrcB);
  82. vecSrcC = vld1q(pSrcA);
  83. pSrcA += 4;
  84. vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
  85. vecSrcD = vld1q(pSrcB);
  86. pSrcB += 4;
  87. vst1q(pDst, vec_acc);
  88. pDst += 4;
  89. vec_acc = vcmulq(vecSrcC, vecSrcD);
  90. vecSrcA = vld1q(pSrcA);
  91. pSrcA += 4;
  92. vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
  93. vecSrcB = vld1q(pSrcB);
  94. pSrcB += 4;
  95. vst1q(pDst, vec_acc);
  96. pDst += 4;
  97. /*
  98. * Decrement the blockSize loop counter
  99. */
  100. blkCnt--;
  101. }
  102. /* process last elements out of the loop avoid the armclang breaking the SW pipeline */
  103. vec_acc = vcmulq(vecSrcA, vecSrcB);
  104. vecSrcC = vld1q(pSrcA);
  105. vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
  106. vecSrcD = vld1q(pSrcB);
  107. vst1q(pDst, vec_acc);
  108. pDst += 4;
  109. vec_acc = vcmulq(vecSrcC, vecSrcD);
  110. vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
  111. vst1q(pDst, vec_acc);
  112. pDst += 4;
  113. /*
  114. * tail
  115. */
  116. blkCnt = CMPLX_DIM * (numSamples & 3);
  117. while (blkCnt > 0) {
  118. mve_pred16_t p = vctp32q(blkCnt);
  119. pSrcA += 4;
  120. pSrcB += 4;
  121. vecSrcA = vldrwq_z_f32(pSrcA, p);
  122. vecSrcB = vldrwq_z_f32(pSrcB, p);
  123. vec_acc = vcmulq_m(vuninitializedq_f32(),vecSrcA, vecSrcB, p);
  124. vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
  125. vstrwq_p_f32(pDst, vec_acc, p);
  126. pDst += 4;
  127. blkCnt -= 4;
  128. }
  129. } else {
  130. /* small vector */
  131. blkCnt = numSamples * CMPLX_DIM;
  132. vec_acc = vdupq_n_f32(0.0f);
  133. do {
  134. mve_pred16_t p = vctp32q(blkCnt);
  135. vecSrcA = vldrwq_z_f32(pSrcA, p);
  136. vecSrcB = vldrwq_z_f32(pSrcB, p);
  137. vec_acc = vcmulq_m(vuninitializedq_f32(),vecSrcA, vecSrcB, p);
  138. vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
  139. vstrwq_p_f32(pDst, vec_acc, p);
  140. pDst += 4;
  141. /*
  142. * Decrement the blkCnt loop counter
  143. * Advance vector source and destination pointers
  144. */
  145. pSrcA += 4;
  146. pSrcB += 4;
  147. blkCnt -= 4;
  148. }
  149. while (blkCnt > 0);
  150. }
  151. }
  152. #else
  153. void arm_cmplx_mult_cmplx_f32(
  154. const float32_t * pSrcA,
  155. const float32_t * pSrcB,
  156. float32_t * pDst,
  157. uint32_t numSamples)
  158. {
  159. uint32_t blkCnt; /* Loop counter */
  160. float32_t a, b, c, d; /* Temporary variables to store real and imaginary values */
  161. #if defined(ARM_MATH_NEON) && !defined(ARM_MATH_AUTOVECTORIZE)
  162. float32x4x2_t va, vb;
  163. float32x4x2_t outCplx;
  164. /* Compute 4 outputs at a time */
  165. blkCnt = numSamples >> 2U;
  166. while (blkCnt > 0U)
  167. {
  168. va = vld2q_f32(pSrcA); // load & separate real/imag pSrcA (de-interleave 2)
  169. vb = vld2q_f32(pSrcB); // load & separate real/imag pSrcB
  170. /* Increment pointers */
  171. pSrcA += 8;
  172. pSrcB += 8;
  173. /* Re{C} = Re{A}*Re{B} - Im{A}*Im{B} */
  174. outCplx.val[0] = vmulq_f32(va.val[0], vb.val[0]);
  175. outCplx.val[0] = vmlsq_f32(outCplx.val[0], va.val[1], vb.val[1]);
  176. /* Im{C} = Re{A}*Im{B} + Im{A}*Re{B} */
  177. outCplx.val[1] = vmulq_f32(va.val[0], vb.val[1]);
  178. outCplx.val[1] = vmlaq_f32(outCplx.val[1], va.val[1], vb.val[0]);
  179. vst2q_f32(pDst, outCplx);
  180. /* Increment pointer */
  181. pDst += 8;
  182. /* Decrement the loop counter */
  183. blkCnt--;
  184. }
  185. /* Tail */
  186. blkCnt = numSamples & 3;
  187. #else
  188. #if defined (ARM_MATH_LOOPUNROLL) && !defined(ARM_MATH_AUTOVECTORIZE)
  189. /* Loop unrolling: Compute 4 outputs at a time */
  190. blkCnt = numSamples >> 2U;
  191. while (blkCnt > 0U)
  192. {
  193. /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
  194. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
  195. a = *pSrcA++;
  196. b = *pSrcA++;
  197. c = *pSrcB++;
  198. d = *pSrcB++;
  199. /* store result in destination buffer. */
  200. *pDst++ = (a * c) - (b * d);
  201. *pDst++ = (a * d) + (b * c);
  202. a = *pSrcA++;
  203. b = *pSrcA++;
  204. c = *pSrcB++;
  205. d = *pSrcB++;
  206. *pDst++ = (a * c) - (b * d);
  207. *pDst++ = (a * d) + (b * c);
  208. a = *pSrcA++;
  209. b = *pSrcA++;
  210. c = *pSrcB++;
  211. d = *pSrcB++;
  212. *pDst++ = (a * c) - (b * d);
  213. *pDst++ = (a * d) + (b * c);
  214. a = *pSrcA++;
  215. b = *pSrcA++;
  216. c = *pSrcB++;
  217. d = *pSrcB++;
  218. *pDst++ = (a * c) - (b * d);
  219. *pDst++ = (a * d) + (b * c);
  220. /* Decrement loop counter */
  221. blkCnt--;
  222. }
  223. /* Loop unrolling: Compute remaining outputs */
  224. blkCnt = numSamples % 0x4U;
  225. #else
  226. /* Initialize blkCnt with number of samples */
  227. blkCnt = numSamples;
  228. #endif /* #if defined (ARM_MATH_LOOPUNROLL) */
  229. #endif /* #if defined(ARM_MATH_NEON) */
  230. while (blkCnt > 0U)
  231. {
  232. /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
  233. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
  234. a = *pSrcA++;
  235. b = *pSrcA++;
  236. c = *pSrcB++;
  237. d = *pSrcB++;
  238. /* store result in destination buffer. */
  239. *pDst++ = (a * c) - (b * d);
  240. *pDst++ = (a * d) + (b * c);
  241. /* Decrement loop counter */
  242. blkCnt--;
  243. }
  244. }
  245. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  246. /**
  247. @} end of CmplxByCmplxMult group
  248. */