arm_cmplx_mult_cmplx_q31.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cmplx_mult_cmplx_q31.c
  4. * Description: Q31 complex-by-complex multiplication
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/complex_math_functions.h"
  29. /**
  30. @ingroup groupCmplxMath
  31. */
  32. /**
  33. @addtogroup CmplxByCmplxMult
  34. @{
  35. */
  36. /**
  37. @brief Q31 complex-by-complex multiplication.
  38. @param[in] pSrcA points to first input vector
  39. @param[in] pSrcB points to second input vector
  40. @param[out] pDst points to output vector
  41. @param[in] numSamples number of samples in each vector
  42. @return none
  43. @par Scaling and Overflow Behavior
  44. The function implements 1.31 by 1.31 multiplications and finally output is converted into 3.29 format.
  45. Input down scaling is not required.
  46. */
  47. #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
  48. void arm_cmplx_mult_cmplx_q31(
  49. const q31_t * pSrcA,
  50. const q31_t * pSrcB,
  51. q31_t * pDst,
  52. uint32_t numSamples)
  53. {
  54. int32_t blkCnt;
  55. q31x4_t vecSrcA, vecSrcB;
  56. q31x4_t vecSrcC, vecSrcD;
  57. q31x4_t vecDst;
  58. blkCnt = numSamples >> 2;
  59. blkCnt -= 1;
  60. if (blkCnt > 0) {
  61. /* should give more freedom to generate stall free code */
  62. vecSrcA = vld1q(pSrcA);
  63. vecSrcB = vld1q(pSrcB);
  64. pSrcA += 4;
  65. pSrcB += 4;
  66. while (blkCnt > 0) {
  67. /* C[2 * i] = A[2 * i] * B[2 * i] - A[2 * i + 1] * B[2 * i + 1]. */
  68. vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcA, vecSrcB);
  69. vecSrcC = vld1q(pSrcA);
  70. pSrcA += 4;
  71. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i]. */
  72. vecDst = vqdmladhxq(vecDst, vecSrcA, vecSrcB);
  73. vecSrcD = vld1q(pSrcB);
  74. pSrcB += 4;
  75. vst1q(pDst, vshrq(vecDst, 2));
  76. pDst += 4;
  77. vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcC, vecSrcD);
  78. vecSrcA = vld1q(pSrcA);
  79. pSrcA += 4;
  80. vecDst = vqdmladhxq(vecDst, vecSrcC, vecSrcD);
  81. vecSrcB = vld1q(pSrcB);
  82. pSrcB += 4;
  83. vst1q(pDst, vshrq(vecDst, 2));
  84. pDst += 4;
  85. /*
  86. * Decrement the blockSize loop counter
  87. */
  88. blkCnt--;
  89. }
  90. /* process last elements out of the loop avoid the armclang breaking the SW pipeline */
  91. vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcA, vecSrcB);
  92. vecSrcC = vld1q(pSrcA);
  93. vecDst = vqdmladhxq(vecDst, vecSrcA, vecSrcB);
  94. vecSrcD = vld1q(pSrcB);
  95. vst1q(pDst, vshrq(vecDst, 2));
  96. pDst += 4;
  97. vecDst = vqdmlsdhq(vuninitializedq_s32(), vecSrcC, vecSrcD);
  98. vecDst = vqdmladhxq(vecDst, vecSrcC, vecSrcD);
  99. vst1q(pDst, vshrq(vecDst, 2));
  100. pDst += 4;
  101. /*
  102. * tail
  103. */
  104. blkCnt = CMPLX_DIM * (numSamples & 3);
  105. do {
  106. mve_pred16_t p = vctp32q(blkCnt);
  107. pSrcA += 4;
  108. pSrcB += 4;
  109. vecSrcA = vldrwq_z_s32(pSrcA, p);
  110. vecSrcB = vldrwq_z_s32(pSrcB, p);
  111. vecDst = vqdmlsdhq_m(vuninitializedq_s32(), vecSrcA, vecSrcB, p);
  112. vecDst = vqdmladhxq_m(vecDst, vecSrcA, vecSrcB, p);
  113. vecDst = vshrq_m(vuninitializedq_s32(), vecDst, 2, p);
  114. vstrwq_p_s32(pDst, vecDst, p);
  115. pDst += 4;
  116. blkCnt -= 4;
  117. }
  118. while ((int32_t) blkCnt > 0);
  119. } else {
  120. blkCnt = numSamples * CMPLX_DIM;
  121. while (blkCnt > 0) {
  122. mve_pred16_t p = vctp32q(blkCnt);
  123. vecSrcA = vldrwq_z_s32(pSrcA, p);
  124. vecSrcB = vldrwq_z_s32(pSrcB, p);
  125. vecDst = vqdmlsdhq_m(vuninitializedq_s32(), vecSrcA, vecSrcB, p);
  126. vecDst = vqdmladhxq_m(vecDst, vecSrcA, vecSrcB, p);
  127. vecDst = vshrq_m(vuninitializedq_s32(), vecDst, 2, p);
  128. vstrwq_p_s32(pDst, vecDst, p);
  129. pDst += 4;
  130. pSrcA += 4;
  131. pSrcB += 4;
  132. blkCnt -= 4;
  133. }
  134. }
  135. }
  136. #else
  137. void arm_cmplx_mult_cmplx_q31(
  138. const q31_t * pSrcA,
  139. const q31_t * pSrcB,
  140. q31_t * pDst,
  141. uint32_t numSamples)
  142. {
  143. uint32_t blkCnt; /* Loop counter */
  144. q31_t a, b, c, d; /* Temporary variables */
  145. #if defined (ARM_MATH_LOOPUNROLL)
  146. /* Loop unrolling: Compute 4 outputs at a time */
  147. blkCnt = numSamples >> 2U;
  148. while (blkCnt > 0U)
  149. {
  150. /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
  151. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
  152. a = *pSrcA++;
  153. b = *pSrcA++;
  154. c = *pSrcB++;
  155. d = *pSrcB++;
  156. /* store result in 3.29 format in destination buffer. */
  157. *pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) );
  158. *pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
  159. a = *pSrcA++;
  160. b = *pSrcA++;
  161. c = *pSrcB++;
  162. d = *pSrcB++;
  163. *pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) );
  164. *pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
  165. a = *pSrcA++;
  166. b = *pSrcA++;
  167. c = *pSrcB++;
  168. d = *pSrcB++;
  169. *pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) );
  170. *pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
  171. a = *pSrcA++;
  172. b = *pSrcA++;
  173. c = *pSrcB++;
  174. d = *pSrcB++;
  175. *pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) );
  176. *pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
  177. /* Decrement loop counter */
  178. blkCnt--;
  179. }
  180. /* Loop unrolling: Compute remaining outputs */
  181. blkCnt = numSamples % 0x4U;
  182. #else
  183. /* Initialize blkCnt with number of samples */
  184. blkCnt = numSamples;
  185. #endif /* #if defined (ARM_MATH_LOOPUNROLL) */
  186. while (blkCnt > 0U)
  187. {
  188. /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
  189. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
  190. a = *pSrcA++;
  191. b = *pSrcA++;
  192. c = *pSrcB++;
  193. d = *pSrcB++;
  194. /* store result in 3.29 format in destination buffer. */
  195. *pDst++ = (q31_t) ( (((q63_t) a * c) >> 33) - (((q63_t) b * d) >> 33) );
  196. *pDst++ = (q31_t) ( (((q63_t) a * d) >> 33) + (((q63_t) b * c) >> 33) );
  197. /* Decrement loop counter */
  198. blkCnt--;
  199. }
  200. }
  201. #endif /* defined(ARM_MATH_MVEI) */
  202. /**
  203. @} end of CmplxByCmplxMult group
  204. */