arm_cfft_q31.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cfft_q31.c
  4. * Description: Combined Radix Decimation in Frequency CFFT fixed point processing function
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/transform_functions.h"
  29. #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
  30. #include "arm_vec_fft.h"
  31. static void _arm_radix4_butterfly_q31_mve(
  32. const arm_cfft_instance_q31 * S,
  33. q31_t *pSrc,
  34. uint32_t fftLen)
  35. {
  36. q31x4_t vecTmp0, vecTmp1;
  37. q31x4_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  38. q31x4_t vecA, vecB, vecC, vecD;
  39. uint32_t blkCnt;
  40. uint32_t n1, n2;
  41. uint32_t stage = 0;
  42. int32_t iter = 1;
  43. static const int32_t strides[4] = {
  44. (0 - 16) * (int32_t)sizeof(q31_t *), (1 - 16) * (int32_t)sizeof(q31_t *),
  45. (8 - 16) * (int32_t)sizeof(q31_t *), (9 - 16) * (int32_t)sizeof(q31_t *)
  46. };
  47. /*
  48. * Process first stages
  49. * Each stage in middle stages provides two down scaling of the input
  50. */
  51. n2 = fftLen;
  52. n1 = n2;
  53. n2 >>= 2u;
  54. for (int k = fftLen / 4u; k > 1; k >>= 2u)
  55. {
  56. q31_t const *p_rearranged_twiddle_tab_stride2 =
  57. &S->rearranged_twiddle_stride2[
  58. S->rearranged_twiddle_tab_stride2_arr[stage]];
  59. q31_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[
  60. S->rearranged_twiddle_tab_stride3_arr[stage]];
  61. q31_t const *p_rearranged_twiddle_tab_stride1 =
  62. &S->rearranged_twiddle_stride1[
  63. S->rearranged_twiddle_tab_stride1_arr[stage]];
  64. q31_t * pBase = pSrc;
  65. for (int i = 0; i < iter; i++)
  66. {
  67. q31_t *inA = pBase;
  68. q31_t *inB = inA + n2 * CMPLX_DIM;
  69. q31_t *inC = inB + n2 * CMPLX_DIM;
  70. q31_t *inD = inC + n2 * CMPLX_DIM;
  71. q31_t const *pW1 = p_rearranged_twiddle_tab_stride1;
  72. q31_t const *pW2 = p_rearranged_twiddle_tab_stride2;
  73. q31_t const *pW3 = p_rearranged_twiddle_tab_stride3;
  74. q31x4_t vecW;
  75. blkCnt = n2 / 2;
  76. /*
  77. * load 2 x q31 complex pair
  78. */
  79. vecA = vldrwq_s32(inA);
  80. vecC = vldrwq_s32(inC);
  81. while (blkCnt > 0U)
  82. {
  83. vecB = vldrwq_s32(inB);
  84. vecD = vldrwq_s32(inD);
  85. vecSum0 = vhaddq(vecA, vecC);
  86. vecDiff0 = vhsubq(vecA, vecC);
  87. vecSum1 = vhaddq(vecB, vecD);
  88. vecDiff1 = vhsubq(vecB, vecD);
  89. /*
  90. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  91. */
  92. vecTmp0 = vhaddq(vecSum0, vecSum1);
  93. vst1q(inA, vecTmp0);
  94. inA += 4;
  95. /*
  96. * [ 1 -1 1 -1 ] * [ A B C D ]'
  97. */
  98. vecTmp0 = vhsubq(vecSum0, vecSum1);
  99. /*
  100. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  101. */
  102. vecW = vld1q(pW2);
  103. pW2 += 4;
  104. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q31x4_t);
  105. vst1q(inB, vecTmp1);
  106. inB += 4;
  107. /*
  108. * [ 1 -i -1 +i ] * [ A B C D ]'
  109. */
  110. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  111. /*
  112. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  113. */
  114. vecW = vld1q(pW1);
  115. pW1 += 4;
  116. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q31x4_t);
  117. vst1q(inC, vecTmp1);
  118. inC += 4;
  119. /*
  120. * [ 1 +i -1 -i ] * [ A B C D ]'
  121. */
  122. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  123. /*
  124. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  125. */
  126. vecW = vld1q(pW3);
  127. pW3 += 4;
  128. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q31x4_t);
  129. vst1q(inD, vecTmp1);
  130. inD += 4;
  131. vecA = vldrwq_s32(inA);
  132. vecC = vldrwq_s32(inC);
  133. blkCnt--;
  134. }
  135. pBase += CMPLX_DIM * n1;
  136. }
  137. n1 = n2;
  138. n2 >>= 2u;
  139. iter = iter << 2;
  140. stage++;
  141. }
  142. /*
  143. * End of 1st stages process
  144. * data is in 11.21(q21) format for the 1024 point as there are 3 middle stages
  145. * data is in 9.23(q23) format for the 256 point as there are 2 middle stages
  146. * data is in 7.25(q25) format for the 64 point as there are 1 middle stage
  147. * data is in 5.27(q27) format for the 16 point as there are no middle stages
  148. */
  149. /*
  150. * start of Last stage process
  151. */
  152. uint32x4_t vecScGathAddr = vld1q_u32((uint32_t*)strides);
  153. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  154. /*
  155. * load scheduling
  156. */
  157. vecA = vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  158. vecC = vldrwq_gather_base_s32(vecScGathAddr, 16);
  159. blkCnt = (fftLen >> 3);
  160. while (blkCnt > 0U)
  161. {
  162. vecSum0 = vhaddq(vecA, vecC);
  163. vecDiff0 = vhsubq(vecA, vecC);
  164. vecB = vldrwq_gather_base_s32(vecScGathAddr, 8);
  165. vecD = vldrwq_gather_base_s32(vecScGathAddr, 24);
  166. vecSum1 = vhaddq(vecB, vecD);
  167. vecDiff1 = vhsubq(vecB, vecD);
  168. /*
  169. * pre-load for next iteration
  170. */
  171. vecA = vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  172. vecC = vldrwq_gather_base_s32(vecScGathAddr, 16);
  173. vecTmp0 = vhaddq(vecSum0, vecSum1);
  174. vstrwq_scatter_base_s32(vecScGathAddr, -64, vecTmp0);
  175. vecTmp0 = vhsubq(vecSum0, vecSum1);
  176. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, vecTmp0);
  177. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  178. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 16, vecTmp0);
  179. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  180. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 24, vecTmp0);
  181. blkCnt--;
  182. }
  183. /*
  184. * output is in 11.21(q21) format for the 1024 point
  185. * output is in 9.23(q23) format for the 256 point
  186. * output is in 7.25(q25) format for the 64 point
  187. * output is in 5.27(q27) format for the 16 point
  188. */
  189. }
  190. static void arm_cfft_radix4by2_q31_mve(const arm_cfft_instance_q31 *S, q31_t *pSrc, uint32_t fftLen)
  191. {
  192. uint32_t n2;
  193. q31_t *pIn0;
  194. q31_t *pIn1;
  195. const q31_t *pCoef = S->pTwiddle;
  196. uint32_t blkCnt;
  197. q31x4_t vecIn0, vecIn1, vecSum, vecDiff;
  198. q31x4_t vecCmplxTmp, vecTw;
  199. n2 = fftLen >> 1;
  200. pIn0 = pSrc;
  201. pIn1 = pSrc + fftLen;
  202. blkCnt = n2 / 2;
  203. while (blkCnt > 0U)
  204. {
  205. vecIn0 = vld1q_s32(pIn0);
  206. vecIn1 = vld1q_s32(pIn1);
  207. vecIn0 = vecIn0 >> 1;
  208. vecIn1 = vecIn1 >> 1;
  209. vecSum = vhaddq(vecIn0, vecIn1);
  210. vst1q(pIn0, vecSum);
  211. pIn0 += 4;
  212. vecTw = vld1q_s32(pCoef);
  213. pCoef += 4;
  214. vecDiff = vhsubq(vecIn0, vecIn1);
  215. vecCmplxTmp = MVE_CMPLX_MULT_FX_AxConjB(vecDiff, vecTw, q31x4_t);
  216. vst1q(pIn1, vecCmplxTmp);
  217. pIn1 += 4;
  218. blkCnt--;
  219. }
  220. _arm_radix4_butterfly_q31_mve(S, pSrc, n2);
  221. _arm_radix4_butterfly_q31_mve(S, pSrc + fftLen, n2);
  222. pIn0 = pSrc;
  223. blkCnt = (fftLen << 1) >> 2;
  224. while (blkCnt > 0U)
  225. {
  226. vecIn0 = vld1q_s32(pIn0);
  227. vecIn0 = vecIn0 << 1;
  228. vst1q(pIn0, vecIn0);
  229. pIn0 += 4;
  230. blkCnt--;
  231. }
  232. /*
  233. * tail
  234. * (will be merged thru tail predication)
  235. */
  236. blkCnt = (fftLen << 1) & 3;
  237. if (blkCnt > 0U)
  238. {
  239. mve_pred16_t p0 = vctp32q(blkCnt);
  240. vecIn0 = vld1q_s32(pIn0);
  241. vecIn0 = vecIn0 << 1;
  242. vstrwq_p(pIn0, vecIn0, p0);
  243. }
  244. }
  245. static void _arm_radix4_butterfly_inverse_q31_mve(
  246. const arm_cfft_instance_q31 *S,
  247. q31_t *pSrc,
  248. uint32_t fftLen)
  249. {
  250. q31x4_t vecTmp0, vecTmp1;
  251. q31x4_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  252. q31x4_t vecA, vecB, vecC, vecD;
  253. uint32_t blkCnt;
  254. uint32_t n1, n2;
  255. uint32_t stage = 0;
  256. int32_t iter = 1;
  257. static const int32_t strides[4] = {
  258. (0 - 16) * (int32_t)sizeof(q31_t *), (1 - 16) * (int32_t)sizeof(q31_t *),
  259. (8 - 16) * (int32_t)sizeof(q31_t *), (9 - 16) * (int32_t)sizeof(q31_t *)
  260. };
  261. /*
  262. * Process first stages
  263. * Each stage in middle stages provides two down scaling of the input
  264. */
  265. n2 = fftLen;
  266. n1 = n2;
  267. n2 >>= 2u;
  268. for (int k = fftLen / 4u; k > 1; k >>= 2u)
  269. {
  270. q31_t const *p_rearranged_twiddle_tab_stride2 =
  271. &S->rearranged_twiddle_stride2[
  272. S->rearranged_twiddle_tab_stride2_arr[stage]];
  273. q31_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[
  274. S->rearranged_twiddle_tab_stride3_arr[stage]];
  275. q31_t const *p_rearranged_twiddle_tab_stride1 =
  276. &S->rearranged_twiddle_stride1[
  277. S->rearranged_twiddle_tab_stride1_arr[stage]];
  278. q31_t * pBase = pSrc;
  279. for (int i = 0; i < iter; i++)
  280. {
  281. q31_t *inA = pBase;
  282. q31_t *inB = inA + n2 * CMPLX_DIM;
  283. q31_t *inC = inB + n2 * CMPLX_DIM;
  284. q31_t *inD = inC + n2 * CMPLX_DIM;
  285. q31_t const *pW1 = p_rearranged_twiddle_tab_stride1;
  286. q31_t const *pW2 = p_rearranged_twiddle_tab_stride2;
  287. q31_t const *pW3 = p_rearranged_twiddle_tab_stride3;
  288. q31x4_t vecW;
  289. blkCnt = n2 / 2;
  290. /*
  291. * load 2 x q31 complex pair
  292. */
  293. vecA = vldrwq_s32(inA);
  294. vecC = vldrwq_s32(inC);
  295. while (blkCnt > 0U)
  296. {
  297. vecB = vldrwq_s32(inB);
  298. vecD = vldrwq_s32(inD);
  299. vecSum0 = vhaddq(vecA, vecC);
  300. vecDiff0 = vhsubq(vecA, vecC);
  301. vecSum1 = vhaddq(vecB, vecD);
  302. vecDiff1 = vhsubq(vecB, vecD);
  303. /*
  304. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  305. */
  306. vecTmp0 = vhaddq(vecSum0, vecSum1);
  307. vst1q(inA, vecTmp0);
  308. inA += 4;
  309. /*
  310. * [ 1 -1 1 -1 ] * [ A B C D ]'
  311. */
  312. vecTmp0 = vhsubq(vecSum0, vecSum1);
  313. /*
  314. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  315. */
  316. vecW = vld1q(pW2);
  317. pW2 += 4;
  318. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q31x4_t);
  319. vst1q(inB, vecTmp1);
  320. inB += 4;
  321. /*
  322. * [ 1 -i -1 +i ] * [ A B C D ]'
  323. */
  324. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  325. /*
  326. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  327. */
  328. vecW = vld1q(pW1);
  329. pW1 += 4;
  330. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q31x4_t);
  331. vst1q(inC, vecTmp1);
  332. inC += 4;
  333. /*
  334. * [ 1 +i -1 -i ] * [ A B C D ]'
  335. */
  336. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  337. /*
  338. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  339. */
  340. vecW = vld1q(pW3);
  341. pW3 += 4;
  342. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q31x4_t);
  343. vst1q(inD, vecTmp1);
  344. inD += 4;
  345. vecA = vldrwq_s32(inA);
  346. vecC = vldrwq_s32(inC);
  347. blkCnt--;
  348. }
  349. pBase += CMPLX_DIM * n1;
  350. }
  351. n1 = n2;
  352. n2 >>= 2u;
  353. iter = iter << 2;
  354. stage++;
  355. }
  356. /*
  357. * End of 1st stages process
  358. * data is in 11.21(q21) format for the 1024 point as there are 3 middle stages
  359. * data is in 9.23(q23) format for the 256 point as there are 2 middle stages
  360. * data is in 7.25(q25) format for the 64 point as there are 1 middle stage
  361. * data is in 5.27(q27) format for the 16 point as there are no middle stages
  362. */
  363. /*
  364. * start of Last stage process
  365. */
  366. uint32x4_t vecScGathAddr = vld1q_u32((uint32_t*)strides);
  367. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  368. /*
  369. * load scheduling
  370. */
  371. vecA = vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  372. vecC = vldrwq_gather_base_s32(vecScGathAddr, 16);
  373. blkCnt = (fftLen >> 3);
  374. while (blkCnt > 0U)
  375. {
  376. vecSum0 = vhaddq(vecA, vecC);
  377. vecDiff0 = vhsubq(vecA, vecC);
  378. vecB = vldrwq_gather_base_s32(vecScGathAddr, 8);
  379. vecD = vldrwq_gather_base_s32(vecScGathAddr, 24);
  380. vecSum1 = vhaddq(vecB, vecD);
  381. vecDiff1 = vhsubq(vecB, vecD);
  382. /*
  383. * pre-load for next iteration
  384. */
  385. vecA = vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  386. vecC = vldrwq_gather_base_s32(vecScGathAddr, 16);
  387. vecTmp0 = vhaddq(vecSum0, vecSum1);
  388. vstrwq_scatter_base_s32(vecScGathAddr, -64, vecTmp0);
  389. vecTmp0 = vhsubq(vecSum0, vecSum1);
  390. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, vecTmp0);
  391. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  392. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 16, vecTmp0);
  393. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  394. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 24, vecTmp0);
  395. blkCnt--;
  396. }
  397. /*
  398. * output is in 11.21(q21) format for the 1024 point
  399. * output is in 9.23(q23) format for the 256 point
  400. * output is in 7.25(q25) format for the 64 point
  401. * output is in 5.27(q27) format for the 16 point
  402. */
  403. }
  404. static void arm_cfft_radix4by2_inverse_q31_mve(const arm_cfft_instance_q31 *S, q31_t *pSrc, uint32_t fftLen)
  405. {
  406. uint32_t n2;
  407. q31_t *pIn0;
  408. q31_t *pIn1;
  409. const q31_t *pCoef = S->pTwiddle;
  410. //uint16_t twidCoefModifier = arm_cfft_radix2_twiddle_factor(S->fftLen);
  411. //q31_t twidIncr = (2 * twidCoefModifier * sizeof(q31_t));
  412. uint32_t blkCnt;
  413. //uint64x2_t vecOffs;
  414. q31x4_t vecIn0, vecIn1, vecSum, vecDiff;
  415. q31x4_t vecCmplxTmp, vecTw;
  416. n2 = fftLen >> 1;
  417. pIn0 = pSrc;
  418. pIn1 = pSrc + fftLen;
  419. //vecOffs[0] = 0;
  420. //vecOffs[1] = (uint64_t) twidIncr;
  421. blkCnt = n2 / 2;
  422. while (blkCnt > 0U)
  423. {
  424. vecIn0 = vld1q_s32(pIn0);
  425. vecIn1 = vld1q_s32(pIn1);
  426. vecIn0 = vecIn0 >> 1;
  427. vecIn1 = vecIn1 >> 1;
  428. vecSum = vhaddq(vecIn0, vecIn1);
  429. vst1q(pIn0, vecSum);
  430. pIn0 += 4;
  431. //vecTw = (q31x4_t) vldrdq_gather_offset_s64(pCoef, vecOffs);
  432. vecTw = vld1q_s32(pCoef);
  433. pCoef += 4;
  434. vecDiff = vhsubq(vecIn0, vecIn1);
  435. vecCmplxTmp = MVE_CMPLX_MULT_FX_AxB(vecDiff, vecTw, q31x4_t);
  436. vst1q(pIn1, vecCmplxTmp);
  437. pIn1 += 4;
  438. //vecOffs = vaddq((q31x4_t) vecOffs, 2 * twidIncr);
  439. blkCnt--;
  440. }
  441. _arm_radix4_butterfly_inverse_q31_mve(S, pSrc, n2);
  442. _arm_radix4_butterfly_inverse_q31_mve(S, pSrc + fftLen, n2);
  443. pIn0 = pSrc;
  444. blkCnt = (fftLen << 1) >> 2;
  445. while (blkCnt > 0U)
  446. {
  447. vecIn0 = vld1q_s32(pIn0);
  448. vecIn0 = vecIn0 << 1;
  449. vst1q(pIn0, vecIn0);
  450. pIn0 += 4;
  451. blkCnt--;
  452. }
  453. /*
  454. * tail
  455. * (will be merged thru tail predication)
  456. */
  457. blkCnt = (fftLen << 1) & 3;
  458. if (blkCnt > 0U)
  459. {
  460. mve_pred16_t p0 = vctp32q(blkCnt);
  461. vecIn0 = vld1q_s32(pIn0);
  462. vecIn0 = vecIn0 << 1;
  463. vstrwq_p(pIn0, vecIn0, p0);
  464. }
  465. }
  466. /**
  467. @ingroup groupTransforms
  468. */
  469. /**
  470. @addtogroup ComplexFFT
  471. @{
  472. */
  473. /**
  474. @brief Processing function for the Q31 complex FFT.
  475. @param[in] S points to an instance of the fixed-point CFFT structure
  476. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  477. @param[in] ifftFlag flag that selects transform direction
  478. - value = 0: forward transform
  479. - value = 1: inverse transform
  480. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  481. - value = 0: disables bit reversal of output
  482. - value = 1: enables bit reversal of output
  483. @return none
  484. */
  485. void arm_cfft_q31(
  486. const arm_cfft_instance_q31 * S,
  487. q31_t * pSrc,
  488. uint8_t ifftFlag,
  489. uint8_t bitReverseFlag)
  490. {
  491. uint32_t fftLen = S->fftLen;
  492. if (ifftFlag == 1U) {
  493. switch (fftLen) {
  494. case 16:
  495. case 64:
  496. case 256:
  497. case 1024:
  498. case 4096:
  499. _arm_radix4_butterfly_inverse_q31_mve(S, pSrc, fftLen);
  500. break;
  501. case 32:
  502. case 128:
  503. case 512:
  504. case 2048:
  505. arm_cfft_radix4by2_inverse_q31_mve(S, pSrc, fftLen);
  506. break;
  507. }
  508. } else {
  509. switch (fftLen) {
  510. case 16:
  511. case 64:
  512. case 256:
  513. case 1024:
  514. case 4096:
  515. _arm_radix4_butterfly_q31_mve(S, pSrc, fftLen);
  516. break;
  517. case 32:
  518. case 128:
  519. case 512:
  520. case 2048:
  521. arm_cfft_radix4by2_q31_mve(S, pSrc, fftLen);
  522. break;
  523. }
  524. }
  525. if (bitReverseFlag)
  526. {
  527. arm_bitreversal_32_inpl_mve((uint32_t*)pSrc, S->bitRevLength, S->pBitRevTable);
  528. }
  529. }
  530. #else
  531. extern void arm_radix4_butterfly_q31(
  532. q31_t * pSrc,
  533. uint32_t fftLen,
  534. const q31_t * pCoef,
  535. uint32_t twidCoefModifier);
  536. extern void arm_radix4_butterfly_inverse_q31(
  537. q31_t * pSrc,
  538. uint32_t fftLen,
  539. const q31_t * pCoef,
  540. uint32_t twidCoefModifier);
  541. extern void arm_bitreversal_32(
  542. uint32_t * pSrc,
  543. const uint16_t bitRevLen,
  544. const uint16_t * pBitRevTable);
  545. void arm_cfft_radix4by2_q31(
  546. q31_t * pSrc,
  547. uint32_t fftLen,
  548. const q31_t * pCoef);
  549. void arm_cfft_radix4by2_inverse_q31(
  550. q31_t * pSrc,
  551. uint32_t fftLen,
  552. const q31_t * pCoef);
  553. /**
  554. @ingroup groupTransforms
  555. */
  556. /**
  557. @addtogroup ComplexFFT
  558. @{
  559. */
  560. /**
  561. @brief Processing function for the Q31 complex FFT.
  562. @param[in] S points to an instance of the fixed-point CFFT structure
  563. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  564. @param[in] ifftFlag flag that selects transform direction
  565. - value = 0: forward transform
  566. - value = 1: inverse transform
  567. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  568. - value = 0: disables bit reversal of output
  569. - value = 1: enables bit reversal of output
  570. @return none
  571. */
  572. void arm_cfft_q31(
  573. const arm_cfft_instance_q31 * S,
  574. q31_t * p1,
  575. uint8_t ifftFlag,
  576. uint8_t bitReverseFlag)
  577. {
  578. uint32_t L = S->fftLen;
  579. if (ifftFlag == 1U)
  580. {
  581. switch (L)
  582. {
  583. case 16:
  584. case 64:
  585. case 256:
  586. case 1024:
  587. case 4096:
  588. arm_radix4_butterfly_inverse_q31 ( p1, L, (q31_t*)S->pTwiddle, 1 );
  589. break;
  590. case 32:
  591. case 128:
  592. case 512:
  593. case 2048:
  594. arm_cfft_radix4by2_inverse_q31 ( p1, L, S->pTwiddle );
  595. break;
  596. }
  597. }
  598. else
  599. {
  600. switch (L)
  601. {
  602. case 16:
  603. case 64:
  604. case 256:
  605. case 1024:
  606. case 4096:
  607. arm_radix4_butterfly_q31 ( p1, L, (q31_t*)S->pTwiddle, 1 );
  608. break;
  609. case 32:
  610. case 128:
  611. case 512:
  612. case 2048:
  613. arm_cfft_radix4by2_q31 ( p1, L, S->pTwiddle );
  614. break;
  615. }
  616. }
  617. if ( bitReverseFlag )
  618. arm_bitreversal_32 ((uint32_t*) p1, S->bitRevLength, S->pBitRevTable);
  619. }
  620. /**
  621. @} end of ComplexFFT group
  622. */
  623. void arm_cfft_radix4by2_q31(
  624. q31_t * pSrc,
  625. uint32_t fftLen,
  626. const q31_t * pCoef)
  627. {
  628. uint32_t i, l;
  629. uint32_t n2;
  630. q31_t xt, yt, cosVal, sinVal;
  631. q31_t p0, p1;
  632. n2 = fftLen >> 1U;
  633. for (i = 0; i < n2; i++)
  634. {
  635. cosVal = pCoef[2 * i];
  636. sinVal = pCoef[2 * i + 1];
  637. l = i + n2;
  638. xt = (pSrc[2 * i] >> 2U) - (pSrc[2 * l] >> 2U);
  639. pSrc[2 * i] = (pSrc[2 * i] >> 2U) + (pSrc[2 * l] >> 2U);
  640. yt = (pSrc[2 * i + 1] >> 2U) - (pSrc[2 * l + 1] >> 2U);
  641. pSrc[2 * i + 1] = (pSrc[2 * l + 1] >> 2U) + (pSrc[2 * i + 1] >> 2U);
  642. mult_32x32_keep32_R(p0, xt, cosVal);
  643. mult_32x32_keep32_R(p1, yt, cosVal);
  644. multAcc_32x32_keep32_R(p0, yt, sinVal);
  645. multSub_32x32_keep32_R(p1, xt, sinVal);
  646. pSrc[2 * l] = p0 << 1;
  647. pSrc[2 * l + 1] = p1 << 1;
  648. }
  649. /* first col */
  650. arm_radix4_butterfly_q31 (pSrc, n2, (q31_t*)pCoef, 2U);
  651. /* second col */
  652. arm_radix4_butterfly_q31 (pSrc + fftLen, n2, (q31_t*)pCoef, 2U);
  653. n2 = fftLen >> 1U;
  654. for (i = 0; i < n2; i++)
  655. {
  656. p0 = pSrc[4 * i + 0];
  657. p1 = pSrc[4 * i + 1];
  658. xt = pSrc[4 * i + 2];
  659. yt = pSrc[4 * i + 3];
  660. p0 <<= 1U;
  661. p1 <<= 1U;
  662. xt <<= 1U;
  663. yt <<= 1U;
  664. pSrc[4 * i + 0] = p0;
  665. pSrc[4 * i + 1] = p1;
  666. pSrc[4 * i + 2] = xt;
  667. pSrc[4 * i + 3] = yt;
  668. }
  669. }
  670. void arm_cfft_radix4by2_inverse_q31(
  671. q31_t * pSrc,
  672. uint32_t fftLen,
  673. const q31_t * pCoef)
  674. {
  675. uint32_t i, l;
  676. uint32_t n2;
  677. q31_t xt, yt, cosVal, sinVal;
  678. q31_t p0, p1;
  679. n2 = fftLen >> 1U;
  680. for (i = 0; i < n2; i++)
  681. {
  682. cosVal = pCoef[2 * i];
  683. sinVal = pCoef[2 * i + 1];
  684. l = i + n2;
  685. xt = (pSrc[2 * i] >> 2U) - (pSrc[2 * l] >> 2U);
  686. pSrc[2 * i] = (pSrc[2 * i] >> 2U) + (pSrc[2 * l] >> 2U);
  687. yt = (pSrc[2 * i + 1] >> 2U) - (pSrc[2 * l + 1] >> 2U);
  688. pSrc[2 * i + 1] = (pSrc[2 * l + 1] >> 2U) + (pSrc[2 * i + 1] >> 2U);
  689. mult_32x32_keep32_R(p0, xt, cosVal);
  690. mult_32x32_keep32_R(p1, yt, cosVal);
  691. multSub_32x32_keep32_R(p0, yt, sinVal);
  692. multAcc_32x32_keep32_R(p1, xt, sinVal);
  693. pSrc[2 * l] = p0 << 1U;
  694. pSrc[2 * l + 1] = p1 << 1U;
  695. }
  696. /* first col */
  697. arm_radix4_butterfly_inverse_q31( pSrc, n2, (q31_t*)pCoef, 2U);
  698. /* second col */
  699. arm_radix4_butterfly_inverse_q31( pSrc + fftLen, n2, (q31_t*)pCoef, 2U);
  700. n2 = fftLen >> 1U;
  701. for (i = 0; i < n2; i++)
  702. {
  703. p0 = pSrc[4 * i + 0];
  704. p1 = pSrc[4 * i + 1];
  705. xt = pSrc[4 * i + 2];
  706. yt = pSrc[4 * i + 3];
  707. p0 <<= 1U;
  708. p1 <<= 1U;
  709. xt <<= 1U;
  710. yt <<= 1U;
  711. pSrc[4 * i + 0] = p0;
  712. pSrc[4 * i + 1] = p1;
  713. pSrc[4 * i + 2] = xt;
  714. pSrc[4 * i + 3] = yt;
  715. }
  716. }
  717. #endif /* defined(ARM_MATH_MVEI) */