arm_cmplx_mult_cmplx_q31.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cmplx_mult_cmplx_q31.c
  4. * Description: Q31 complex-by-complex multiplication
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/complex_math_functions.h"
  29. /**
  30. @ingroup groupCmplxMath
  31. */
  32. /**
  33. @addtogroup CmplxByCmplxMult
  34. @{
  35. */
  36. /**
  37. @brief Q31 complex-by-complex multiplication.
  38. @param[in] pSrcA points to first input vector
  39. @param[in] pSrcB points to second input vector
  40. @param[out] pDst points to output vector
  41. @param[in] numSamples number of samples in each vector
  42. @par Scaling and Overflow Behavior
  43. The function implements 1.31 by 1.31 multiplications and finally output is converted into 3.29 format.
  44. Input down scaling is not required.
  45. */
  46. #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
  47. void arm_cmplx_mult_cmplx_q31(
  48. const q31_t * pSrcA,
  49. const q31_t * pSrcB,
  50. q31_t * pDst,
  51. uint32_t numSamples)
  52. {
  53. int32_t blkCnt;
  54. q31x4_t vecSrcA, vecSrcB;
  55. q31x4_t vecSrcC, vecSrcD;
  56. q31x4_t vecDst;
  57. blkCnt = numSamples >> 2;
  58. blkCnt -= 1;
  59. if (blkCnt > 0) {
  60. /* should give more freedom to generate stall free code */
  61. vecSrcA = vld1q(pSrcA);
  62. vecSrcB = vld1q(pSrcB);
  63. pSrcA += 4;
  64. pSrcB += 4;
  65. while (blkCnt > 0) {
  66. /* C[2 * i] = A[2 * i] * B[2 * i] - A[2 * i + 1] * B[2 * i + 1]. */
  67. vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcA, vecSrcB);
  68. vecSrcC = vld1q(pSrcA);
  69. pSrcA += 4;
  70. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i]. */
  71. vecDst = vqdmladhxq(vecDst, vecSrcA, vecSrcB);
  72. vecSrcD = vld1q(pSrcB);
  73. pSrcB += 4;
  74. vst1q(pDst, vshrq(vecDst, 2));
  75. pDst += 4;
  76. vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcC, vecSrcD);
  77. vecSrcA = vld1q(pSrcA);
  78. pSrcA += 4;
  79. vecDst = vqdmladhxq(vecDst, vecSrcC, vecSrcD);
  80. vecSrcB = vld1q(pSrcB);
  81. pSrcB += 4;
  82. vst1q(pDst, vshrq(vecDst, 2));
  83. pDst += 4;
  84. /*
  85. * Decrement the blockSize loop counter
  86. */
  87. blkCnt--;
  88. }
  89. /* process last elements out of the loop avoid the armclang breaking the SW pipeline */
  90. vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcA, vecSrcB);
  91. vecSrcC = vld1q(pSrcA);
  92. vecDst = vqdmladhxq(vecDst, vecSrcA, vecSrcB);
  93. vecSrcD = vld1q(pSrcB);
  94. vst1q(pDst, vshrq(vecDst, 2));
  95. pDst += 4;
  96. vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcC, vecSrcD);
  97. vecDst = vqdmladhxq(vecDst, vecSrcC, vecSrcD);
  98. vst1q(pDst, vshrq(vecDst, 2));
  99. pDst += 4;
  100. /*
  101. * tail
  102. */
  103. blkCnt = CMPLX_DIM * (numSamples & 3);
  104. do {
  105. mve_pred16_t p = vctp32q(blkCnt);
  106. pSrcA += 4;
  107. pSrcB += 4;
  108. vecSrcA = vldrwq_z_s32(pSrcA, p);
  109. vecSrcB = vldrwq_z_s32(pSrcB, p);
  110. vecDst = vqdmlsdhq_m(vuninitializedq_s32(), vecSrcA, vecSrcB, p);
  111. vecDst = vqdmladhxq_m(vecDst, vecSrcA, vecSrcB, p);
  112. vecDst = vshrq_m(vuninitializedq_s32(), vecDst, 2, p);
  113. vstrwq_p_s32(pDst, vecDst, p);
  114. pDst += 4;
  115. blkCnt -= 4;
  116. }
  117. while ((int32_t) blkCnt > 0);
  118. } else {
  119. blkCnt = numSamples * CMPLX_DIM;
  120. while (blkCnt > 0) {
  121. mve_pred16_t p = vctp32q(blkCnt);
  122. vecSrcA = vldrwq_z_s32(pSrcA, p);
  123. vecSrcB = vldrwq_z_s32(pSrcB, p);
  124. vecDst = vqdmlsdhq_m(vuninitializedq_s32(), vecSrcA, vecSrcB, p);
  125. vecDst = vqdmladhxq_m(vecDst, vecSrcA, vecSrcB, p);
  126. vecDst = vshrq_m(vuninitializedq_s32(), vecDst, 2, p);
  127. vstrwq_p_s32(pDst, vecDst, p);
  128. pDst += 4;
  129. pSrcA += 4;
  130. pSrcB += 4;
  131. blkCnt -= 4;
  132. }
  133. }
  134. }
  135. #else
  136. void arm_cmplx_mult_cmplx_q31(
  137. const q31_t * pSrcA,
  138. const q31_t * pSrcB,
  139. q31_t * pDst,
  140. uint32_t numSamples)
  141. {
  142. uint32_t blkCnt; /* Loop counter */
  143. q31_t a, b, c, d; /* Temporary variables */
  144. #if defined (ARM_MATH_LOOPUNROLL)
  145. /* Loop unrolling: Compute 4 outputs at a time */
  146. blkCnt = numSamples >> 2U;
  147. while (blkCnt > 0U)
  148. {
  149. /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
  150. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
  151. a = *pSrcA++;
  152. b = *pSrcA++;
  153. c = *pSrcB++;
  154. d = *pSrcB++;
  155. /* store result in 3.29 format in destination buffer. */
  156. *pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) );
  157. *pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
  158. a = *pSrcA++;
  159. b = *pSrcA++;
  160. c = *pSrcB++;
  161. d = *pSrcB++;
  162. *pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) );
  163. *pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
  164. a = *pSrcA++;
  165. b = *pSrcA++;
  166. c = *pSrcB++;
  167. d = *pSrcB++;
  168. *pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) );
  169. *pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
  170. a = *pSrcA++;
  171. b = *pSrcA++;
  172. c = *pSrcB++;
  173. d = *pSrcB++;
  174. *pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) );
  175. *pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
  176. /* Decrement loop counter */
  177. blkCnt--;
  178. }
  179. /* Loop unrolling: Compute remaining outputs */
  180. blkCnt = numSamples % 0x4U;
  181. #else
  182. /* Initialize blkCnt with number of samples */
  183. blkCnt = numSamples;
  184. #endif /* #if defined (ARM_MATH_LOOPUNROLL) */
  185. while (blkCnt > 0U)
  186. {
  187. /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
  188. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
  189. a = *pSrcA++;
  190. b = *pSrcA++;
  191. c = *pSrcB++;
  192. d = *pSrcB++;
  193. /* store result in 3.29 format in destination buffer. */
  194. *pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) );
  195. *pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
  196. /* Decrement loop counter */
  197. blkCnt--;
  198. }
  199. }
  200. #endif /* defined(ARM_MATH_MVEI) */
  201. /**
  202. @} end of CmplxByCmplxMult group
  203. */