arm_cmplx_mult_cmplx_q15.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cmplx_mult_cmplx_q15.c
  4. * Description: Q15 complex-by-complex multiplication
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/complex_math_functions.h"
  29. /**
  30. @ingroup groupCmplxMath
  31. */
  32. /**
  33. @addtogroup CmplxByCmplxMult
  34. @{
  35. */
  36. /**
  37. @brief Q15 complex-by-complex multiplication.
  38. @param[in] pSrcA points to first input vector
  39. @param[in] pSrcB points to second input vector
  40. @param[out] pDst points to output vector
  41. @param[in] numSamples number of samples in each vector
  42. @return none
  43. @par Scaling and Overflow Behavior
  44. The function implements 1.15 by 1.15 multiplications and finally output is converted into 3.13 format.
  45. */
  46. #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
  47. void arm_cmplx_mult_cmplx_q15(
  48. const q15_t * pSrcA,
  49. const q15_t * pSrcB,
  50. q15_t * pDst,
  51. uint32_t numSamples)
  52. {
  53. int32_t blkCnt;
  54. q15x8_t vecSrcA, vecSrcB;
  55. q15x8_t vecSrcC, vecSrcD;
  56. q15x8_t vecDst;
  57. blkCnt = (numSamples >> 3);
  58. blkCnt -= 1;
  59. if (blkCnt > 0)
  60. {
  61. /* should give more freedom to generate stall free code */
  62. vecSrcA = vld1q(pSrcA);
  63. vecSrcB = vld1q(pSrcB);
  64. pSrcA += 8;
  65. pSrcB += 8;
  66. while (blkCnt > 0)
  67. {
  68. /* C[2 * i] = A[2 * i] * B[2 * i] - A[2 * i + 1] * B[2 * i + 1]. */
  69. vecDst = vqdmlsdhq(vuninitializedq_s16(), vecSrcA, vecSrcB);
  70. vecSrcC = vld1q(pSrcA);
  71. pSrcA += 8;
  72. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i]. */
  73. vecDst = vqdmladhxq(vecDst, vecSrcA, vecSrcB);
  74. vecSrcD = vld1q(pSrcB);
  75. pSrcB += 8;
  76. vstrhq_s16(pDst, vshrq(vecDst, 2));
  77. pDst += 8;
  78. vecDst = vqdmlsdhq(vuninitializedq_s16(), vecSrcC, vecSrcD);
  79. vecSrcA = vld1q(pSrcA);
  80. pSrcA += 8;
  81. vecDst = vqdmladhxq(vecDst, vecSrcC, vecSrcD);
  82. vecSrcB = vld1q(pSrcB);
  83. pSrcB += 8;
  84. vstrhq_s16(pDst, vshrq(vecDst, 2));
  85. pDst += 8;
  86. /*
  87. * Decrement the blockSize loop counter
  88. */
  89. blkCnt--;
  90. }
  91. /* process last elements out of the loop avoid the armclang breaking the SW pipeline */
  92. vecDst = vqdmlsdhq(vuninitializedq_s16(), vecSrcA, vecSrcB);
  93. vecSrcC = vld1q(pSrcA);
  94. vecDst = vqdmladhxq(vecDst, vecSrcA, vecSrcB);
  95. vecSrcD = vld1q(pSrcB);
  96. vstrhq_s16(pDst, vshrq(vecDst, 2));
  97. pDst += 8;
  98. vecDst = vqdmlsdhq(vuninitializedq_s16(), vecSrcC, vecSrcD);
  99. vecDst = vqdmladhxq(vecDst, vecSrcC, vecSrcD);
  100. vstrhq_s16(pDst, vshrq(vecDst, 2));
  101. pDst += 8;
  102. /*
  103. * tail
  104. */
  105. blkCnt = CMPLX_DIM * (numSamples & 7);
  106. do
  107. {
  108. mve_pred16_t p = vctp16q(blkCnt);
  109. pSrcA += 8;
  110. pSrcB += 8;
  111. vecSrcA = vldrhq_z_s16(pSrcA, p);
  112. vecSrcB = vldrhq_z_s16(pSrcB, p);
  113. vecDst = vqdmlsdhq_m(vuninitializedq_s16(), vecSrcA, vecSrcB, p);
  114. vecDst = vqdmladhxq_m(vecDst, vecSrcA, vecSrcB, p);
  115. vecDst = vshrq_m(vuninitializedq_s16(), vecDst, 2, p);
  116. vstrhq_p_s16(pDst, vecDst, p);
  117. pDst += 8;
  118. blkCnt -= 8;
  119. }
  120. while ((int32_t) blkCnt > 0);
  121. }
  122. else
  123. {
  124. blkCnt = numSamples * CMPLX_DIM;
  125. while (blkCnt > 0) {
  126. mve_pred16_t p = vctp16q(blkCnt);
  127. vecSrcA = vldrhq_z_s16(pSrcA, p);
  128. vecSrcB = vldrhq_z_s16(pSrcB, p);
  129. vecDst = vqdmlsdhq_m(vuninitializedq_s16(), vecSrcA, vecSrcB, p);
  130. vecDst = vqdmladhxq_m(vecDst, vecSrcA, vecSrcB, p);
  131. vecDst = vshrq_m(vuninitializedq_s16(), vecDst, 2, p);
  132. vstrhq_p_s16(pDst, vecDst, p);
  133. pDst += 8;
  134. pSrcA += 8;
  135. pSrcB += 8;
  136. blkCnt -= 8;
  137. }
  138. }
  139. }
  140. #else
  141. void arm_cmplx_mult_cmplx_q15(
  142. const q15_t * pSrcA,
  143. const q15_t * pSrcB,
  144. q15_t * pDst,
  145. uint32_t numSamples)
  146. {
  147. uint32_t blkCnt; /* Loop counter */
  148. q15_t a, b, c, d; /* Temporary variables */
  149. #if defined (ARM_MATH_LOOPUNROLL)
  150. /* Loop unrolling: Compute 4 outputs at a time */
  151. blkCnt = numSamples >> 2U;
  152. while (blkCnt > 0U)
  153. {
  154. /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
  155. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
  156. a = *pSrcA++;
  157. b = *pSrcA++;
  158. c = *pSrcB++;
  159. d = *pSrcB++;
  160. /* store result in 3.13 format in destination buffer. */
  161. *pDst++ = (q15_t) ( (((q31_t) a * c) >> 17) - (((q31_t) b * d) >> 17) );
  162. *pDst++ = (q15_t) ( (((q31_t) a * d) >> 17) + (((q31_t) b * c) >> 17) );
  163. a = *pSrcA++;
  164. b = *pSrcA++;
  165. c = *pSrcB++;
  166. d = *pSrcB++;
  167. *pDst++ = (q15_t) ( (((q31_t) a * c) >> 17) - (((q31_t) b * d) >> 17) );
  168. *pDst++ = (q15_t) ( (((q31_t) a * d) >> 17) + (((q31_t) b * c) >> 17) );
  169. a = *pSrcA++;
  170. b = *pSrcA++;
  171. c = *pSrcB++;
  172. d = *pSrcB++;
  173. *pDst++ = (q15_t) ( (((q31_t) a * c) >> 17) - (((q31_t) b * d) >> 17) );
  174. *pDst++ = (q15_t) ( (((q31_t) a * d) >> 17) + (((q31_t) b * c) >> 17) );
  175. a = *pSrcA++;
  176. b = *pSrcA++;
  177. c = *pSrcB++;
  178. d = *pSrcB++;
  179. *pDst++ = (q15_t) ( (((q31_t) a * c) >> 17) - (((q31_t) b * d) >> 17) );
  180. *pDst++ = (q15_t) ( (((q31_t) a * d) >> 17) + (((q31_t) b * c) >> 17) );
  181. /* Decrement loop counter */
  182. blkCnt--;
  183. }
  184. /* Loop unrolling: Compute remaining outputs */
  185. blkCnt = numSamples % 0x4U;
  186. #else
  187. /* Initialize blkCnt with number of samples */
  188. blkCnt = numSamples;
  189. #endif /* #if defined (ARM_MATH_LOOPUNROLL) */
  190. while (blkCnt > 0U)
  191. {
  192. /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
  193. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
  194. a = *pSrcA++;
  195. b = *pSrcA++;
  196. c = *pSrcB++;
  197. d = *pSrcB++;
  198. /* store result in 3.13 format in destination buffer. */
  199. *pDst++ = (q15_t) ( (((q31_t) a * c) >> 17) - (((q31_t) b * d) >> 17) );
  200. *pDst++ = (q15_t) ( (((q31_t) a * d) >> 17) + (((q31_t) b * c) >> 17) );
  201. /* Decrement loop counter */
  202. blkCnt--;
  203. }
  204. }
  205. #endif /* defined(ARM_MATH_MVEI) */
  206. /**
  207. @} end of CmplxByCmplxMult group
  208. */