arm_cmplx_mult_cmplx_f16.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cmplx_mult_cmplx_f16.c
  4. * Description: Floating-point complex-by-complex multiplication
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/complex_math_functions_f16.h"
  29. #if defined(ARM_FLOAT16_SUPPORTED)
  30. /**
  31. @ingroup groupCmplxMath
  32. */
  33. /**
  34. @addtogroup CmplxByCmplxMult
  35. @{
  36. */
  37. /**
  38. @brief Floating-point complex-by-complex multiplication.
  39. @param[in] pSrcA points to first input vector
  40. @param[in] pSrcB points to second input vector
  41. @param[out] pDst points to output vector
  42. @param[in] numSamples number of samples in each vector
  43. */
  44. #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
  45. void arm_cmplx_mult_cmplx_f16(
  46. const float16_t * pSrcA,
  47. const float16_t * pSrcB,
  48. float16_t * pDst,
  49. uint32_t numSamples)
  50. {
  51. int32_t blkCnt;
  52. f16x8_t vecSrcA, vecSrcB;
  53. f16x8_t vecSrcC, vecSrcD;
  54. f16x8_t vec_acc;
  55. blkCnt = (numSamples >> 3);
  56. blkCnt -= 1;
  57. if (blkCnt > 0) {
  58. /* should give more freedom to generate stall free code */
  59. vecSrcA = vld1q(pSrcA);
  60. vecSrcB = vld1q(pSrcB);
  61. pSrcA += 8;
  62. pSrcB += 8;
  63. while (blkCnt > 0) {
  64. vec_acc = vcmulq(vecSrcA, vecSrcB);
  65. vecSrcC = vld1q(pSrcA);
  66. pSrcA += 8;
  67. vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
  68. vecSrcD = vld1q(pSrcB);
  69. pSrcB += 8;
  70. vst1q(pDst, vec_acc);
  71. pDst += 8;
  72. vec_acc = vcmulq(vecSrcC, vecSrcD);
  73. vecSrcA = vld1q(pSrcA);
  74. pSrcA += 8;
  75. vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
  76. vecSrcB = vld1q(pSrcB);
  77. pSrcB += 8;
  78. vst1q(pDst, vec_acc);
  79. pDst += 8;
  80. /*
  81. * Decrement the blockSize loop counter
  82. */
  83. blkCnt--;
  84. }
  85. /* process last elements out of the loop avoid the armclang breaking the SW pipeline */
  86. vec_acc = vcmulq(vecSrcA, vecSrcB);
  87. vecSrcC = vld1q(pSrcA);
  88. vec_acc = vcmlaq_rot90(vec_acc, vecSrcA, vecSrcB);
  89. vecSrcD = vld1q(pSrcB);
  90. vst1q(pDst, vec_acc);
  91. pDst += 8;
  92. vec_acc = vcmulq(vecSrcC, vecSrcD);
  93. vec_acc = vcmlaq_rot90(vec_acc, vecSrcC, vecSrcD);
  94. vst1q(pDst, vec_acc);
  95. pDst += 8;
  96. /*
  97. * tail
  98. */
  99. blkCnt = CMPLX_DIM * (numSamples & 7);
  100. while (blkCnt > 0) {
  101. mve_pred16_t p = vctp16q(blkCnt);
  102. pSrcA += 8;
  103. pSrcB += 8;
  104. vecSrcA = vldrhq_z_f16(pSrcA, p);
  105. vecSrcB = vldrhq_z_f16(pSrcB, p);
  106. vec_acc = vcmulq_m(vuninitializedq_f16(),vecSrcA, vecSrcB, p);
  107. vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
  108. vstrhq_p_f16(pDst, vec_acc, p);
  109. pDst += 8;
  110. blkCnt -= 8;
  111. }
  112. } else {
  113. /* small vector */
  114. blkCnt = numSamples * CMPLX_DIM;
  115. do {
  116. mve_pred16_t p = vctp16q(blkCnt);
  117. vecSrcA = vldrhq_z_f16(pSrcA, p);
  118. vecSrcB = vldrhq_z_f16(pSrcB, p);
  119. vec_acc = vcmulq_m(vuninitializedq_f16(),vecSrcA, vecSrcB, p);
  120. vec_acc = vcmlaq_rot90_m(vec_acc, vecSrcA, vecSrcB, p);
  121. vstrhq_p_f16(pDst, vec_acc, p);
  122. pDst += 8;
  123. /*
  124. * Decrement the blkCnt loop counter
  125. * Advance vector source and destination pointers
  126. */
  127. pSrcA += 8;
  128. pSrcB += 8;
  129. blkCnt -= 8;
  130. }
  131. while (blkCnt > 0);
  132. }
  133. }
  134. #else
  135. void arm_cmplx_mult_cmplx_f16(
  136. const float16_t * pSrcA,
  137. const float16_t * pSrcB,
  138. float16_t * pDst,
  139. uint32_t numSamples)
  140. {
  141. uint32_t blkCnt; /* Loop counter */
  142. _Float16 a, b, c, d; /* Temporary variables to store real and imaginary values */
  143. #if defined (ARM_MATH_LOOPUNROLL) && !defined(ARM_MATH_AUTOVECTORIZE)
  144. /* Loop unrolling: Compute 4 outputs at a time */
  145. blkCnt = numSamples >> 2U;
  146. while (blkCnt > 0U)
  147. {
  148. /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
  149. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
  150. a = *pSrcA++;
  151. b = *pSrcA++;
  152. c = *pSrcB++;
  153. d = *pSrcB++;
  154. /* store result in destination buffer. */
  155. *pDst++ = (a * c) - (b * d);
  156. *pDst++ = (a * d) + (b * c);
  157. a = *pSrcA++;
  158. b = *pSrcA++;
  159. c = *pSrcB++;
  160. d = *pSrcB++;
  161. *pDst++ = (a * c) - (b * d);
  162. *pDst++ = (a * d) + (b * c);
  163. a = *pSrcA++;
  164. b = *pSrcA++;
  165. c = *pSrcB++;
  166. d = *pSrcB++;
  167. *pDst++ = (a * c) - (b * d);
  168. *pDst++ = (a * d) + (b * c);
  169. a = *pSrcA++;
  170. b = *pSrcA++;
  171. c = *pSrcB++;
  172. d = *pSrcB++;
  173. *pDst++ = (a * c) - (b * d);
  174. *pDst++ = (a * d) + (b * c);
  175. /* Decrement loop counter */
  176. blkCnt--;
  177. }
  178. /* Loop unrolling: Compute remaining outputs */
  179. blkCnt = numSamples % 0x4U;
  180. #else
  181. /* Initialize blkCnt with number of samples */
  182. blkCnt = numSamples;
  183. #endif /* #if defined (ARM_MATH_LOOPUNROLL) */
  184. while (blkCnt > 0U)
  185. {
  186. /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */
  187. /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */
  188. a = *pSrcA++;
  189. b = *pSrcA++;
  190. c = *pSrcB++;
  191. d = *pSrcB++;
  192. /* store result in destination buffer. */
  193. *pDst++ = (a * c) - (b * d);
  194. *pDst++ = (a * d) + (b * c);
  195. /* Decrement loop counter */
  196. blkCnt--;
  197. }
  198. }
  199. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  200. /**
  201. @} end of CmplxByCmplxMult group
  202. */
  203. #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */