arm_cfft_f32.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cfft_f32.c
  4. * Description: Combined Radix Decimation in Frequency CFFT Floating point processing function
  5. *
  6. * $Date: 18. March 2019
  7. * $Revision: V1.6.0
  8. *
  9. * Target Processor: Cortex-M cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "arm_math.h"
  29. #include "arm_common_tables.h"
  30. #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)
  31. #include "arm_helium_utils.h"
  32. #include "arm_vec_fft.h"
  33. #include "arm_mve_tables.h"
  34. static float32_t arm_inverse_fft_length_f32(uint16_t fftLen)
  35. {
  36. float32_t retValue=1.0;
  37. switch (fftLen)
  38. {
  39. case 4096U:
  40. retValue = 0.000244140625;
  41. break;
  42. case 2048U:
  43. retValue = 0.00048828125;
  44. break;
  45. case 1024U:
  46. retValue = 0.0009765625f;
  47. break;
  48. case 512U:
  49. retValue = 0.001953125;
  50. break;
  51. case 256U:
  52. retValue = 0.00390625f;
  53. break;
  54. case 128U:
  55. retValue = 0.0078125;
  56. break;
  57. case 64U:
  58. retValue = 0.015625f;
  59. break;
  60. case 32U:
  61. retValue = 0.03125;
  62. break;
  63. case 16U:
  64. retValue = 0.0625f;
  65. break;
  66. default:
  67. break;
  68. }
  69. return(retValue);
  70. }
  71. static void arm_bitreversal_f32_inpl_mve(
  72. uint32_t *pSrc,
  73. const uint16_t bitRevLen,
  74. const uint16_t *pBitRevTab)
  75. {
  76. uint64_t *src = (uint64_t *) pSrc;
  77. uint32_t blkCnt; /* loop counters */
  78. uint32x4_t bitRevTabOff;
  79. uint32x4_t one = vdupq_n_u32(1);
  80. blkCnt = (bitRevLen / 2) / 2;
  81. while (blkCnt > 0U) {
  82. bitRevTabOff = vldrhq_u32(pBitRevTab);
  83. pBitRevTab += 4;
  84. uint64x2_t bitRevOff1 = vmullbq_int_u32(bitRevTabOff, one);
  85. uint64x2_t bitRevOff2 = vmulltq_int_u32(bitRevTabOff, one);
  86. uint64x2_t in1 = vldrdq_gather_offset_u64(src, bitRevOff1);
  87. uint64x2_t in2 = vldrdq_gather_offset_u64(src, bitRevOff2);
  88. vstrdq_scatter_offset_u64(src, bitRevOff1, in2);
  89. vstrdq_scatter_offset_u64(src, bitRevOff2, in1);
  90. /*
  91. * Decrement the blockSize loop counter
  92. */
  93. blkCnt--;
  94. }
  95. }
  96. static void _arm_radix4_butterfly_f32_mve(const arm_cfft_instance_f32 * S,float32_t * pSrc, uint32_t fftLen)
  97. {
  98. f32x4_t vecTmp0, vecTmp1;
  99. f32x4_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  100. f32x4_t vecA, vecB, vecC, vecD;
  101. uint32_t blkCnt;
  102. uint32_t n1, n2;
  103. uint32_t stage = 0;
  104. int32_t iter = 1;
  105. static const uint32_t strides[4] = {
  106. (0 - 16) * sizeof(q31_t *),
  107. (1 - 16) * sizeof(q31_t *),
  108. (8 - 16) * sizeof(q31_t *),
  109. (9 - 16) * sizeof(q31_t *)
  110. };
  111. n2 = fftLen;
  112. n1 = n2;
  113. n2 >>= 2u;
  114. for (int k = fftLen / 4u; k > 1; k >>= 2)
  115. {
  116. for (int i = 0; i < iter; i++)
  117. {
  118. float32_t const *p_rearranged_twiddle_tab_stride1 =
  119. &S->rearranged_twiddle_stride1[
  120. S->rearranged_twiddle_tab_stride1_arr[stage]];
  121. float32_t const *p_rearranged_twiddle_tab_stride2 =
  122. &S->rearranged_twiddle_stride2[
  123. S->rearranged_twiddle_tab_stride2_arr[stage]];
  124. float32_t const *p_rearranged_twiddle_tab_stride3 =
  125. &S->rearranged_twiddle_stride3[
  126. S->rearranged_twiddle_tab_stride3_arr[stage]];
  127. float32_t const *pW1, *pW2, *pW3;
  128. float32_t *inA = pSrc + CMPLX_DIM * i * n1;
  129. float32_t *inB = inA + n2 * CMPLX_DIM;
  130. float32_t *inC = inB + n2 * CMPLX_DIM;
  131. float32_t *inD = inC + n2 * CMPLX_DIM;
  132. f32x4_t vecW;
  133. pW1 = p_rearranged_twiddle_tab_stride1;
  134. pW2 = p_rearranged_twiddle_tab_stride2;
  135. pW3 = p_rearranged_twiddle_tab_stride3;
  136. blkCnt = n2 / 2;
  137. /*
  138. * load 2 f32 complex pair
  139. */
  140. vecA = vldrwq_f32(inA);
  141. vecC = vldrwq_f32(inC);
  142. while (blkCnt > 0U)
  143. {
  144. vecB = vldrwq_f32(inB);
  145. vecD = vldrwq_f32(inD);
  146. vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */
  147. vecDiff0 = vecA - vecC; /* vecSum0 = vsubq(vecA, vecC) */
  148. vecSum1 = vecB + vecD;
  149. vecDiff1 = vecB - vecD;
  150. /*
  151. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  152. */
  153. vecTmp0 = vecSum0 + vecSum1;
  154. vst1q(inA, vecTmp0);
  155. inA += 4;
  156. /*
  157. * [ 1 -1 1 -1 ] * [ A B C D ]'
  158. */
  159. vecTmp0 = vecSum0 - vecSum1;
  160. /*
  161. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  162. */
  163. vecW = vld1q(pW2);
  164. pW2 += 4;
  165. vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0);
  166. vst1q(inB, vecTmp1);
  167. inB += 4;
  168. /*
  169. * [ 1 -i -1 +i ] * [ A B C D ]'
  170. */
  171. vecTmp0 = MVE_CMPLX_SUB_A_ixB(vecDiff0, vecDiff1);
  172. /*
  173. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  174. */
  175. vecW = vld1q(pW1);
  176. pW1 +=4;
  177. vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0);
  178. vst1q(inC, vecTmp1);
  179. inC += 4;
  180. /*
  181. * [ 1 +i -1 -i ] * [ A B C D ]'
  182. */
  183. vecTmp0 = MVE_CMPLX_ADD_A_ixB(vecDiff0, vecDiff1);
  184. /*
  185. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  186. */
  187. vecW = vld1q(pW3);
  188. pW3 += 4;
  189. vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0);
  190. vst1q(inD, vecTmp1);
  191. inD += 4;
  192. vecA = vldrwq_f32(inA);
  193. vecC = vldrwq_f32(inC);
  194. blkCnt--;
  195. }
  196. }
  197. n1 = n2;
  198. n2 >>= 2u;
  199. iter = iter << 2;
  200. stage++;
  201. }
  202. /*
  203. * start of Last stage process
  204. */
  205. uint32x4_t vecScGathAddr = *(uint32x4_t *) strides;
  206. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  207. /* load scheduling */
  208. vecA = vldrwq_gather_base_wb_f32(&vecScGathAddr, 64);
  209. vecC = vldrwq_gather_base_f32(vecScGathAddr, 16);
  210. blkCnt = (fftLen >> 3);
  211. while (blkCnt > 0U)
  212. {
  213. vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */
  214. vecDiff0 = vecA - vecC; /* vecSum0 = vsubq(vecA, vecC) */
  215. vecB = vldrwq_gather_base_f32(vecScGathAddr, 8);
  216. vecD = vldrwq_gather_base_f32(vecScGathAddr, 24);
  217. vecSum1 = vecB + vecD;
  218. vecDiff1 = vecB - vecD;
  219. /* pre-load for next iteration */
  220. vecA = vldrwq_gather_base_wb_f32(&vecScGathAddr, 64);
  221. vecC = vldrwq_gather_base_f32(vecScGathAddr, 16);
  222. vecTmp0 = vecSum0 + vecSum1;
  223. vstrwq_scatter_base_f32(vecScGathAddr, -64, vecTmp0);
  224. vecTmp0 = vecSum0 - vecSum1;
  225. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 8, vecTmp0);
  226. vecTmp0 = MVE_CMPLX_SUB_A_ixB(vecDiff0, vecDiff1);
  227. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 16, vecTmp0);
  228. vecTmp0 = MVE_CMPLX_ADD_A_ixB(vecDiff0, vecDiff1);
  229. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 24, vecTmp0);
  230. blkCnt--;
  231. }
  232. /*
  233. * End of last stage process
  234. */
  235. }
  236. static void arm_cfft_radix4by2_f32_mve(const arm_cfft_instance_f32 * S, float32_t *pSrc, uint32_t fftLen)
  237. {
  238. float32_t const *pCoefVec;
  239. float32_t const *pCoef = S->pTwiddle;
  240. float32_t *pIn0, *pIn1;
  241. uint32_t n2;
  242. uint32_t blkCnt;
  243. f32x4_t vecIn0, vecIn1, vecSum, vecDiff;
  244. f32x4_t vecCmplxTmp, vecTw;
  245. n2 = fftLen >> 1;
  246. pIn0 = pSrc;
  247. pIn1 = pSrc + fftLen;
  248. pCoefVec = pCoef;
  249. blkCnt = n2 / 2;
  250. while (blkCnt > 0U)
  251. {
  252. vecIn0 = *(f32x4_t *) pIn0;
  253. vecIn1 = *(f32x4_t *) pIn1;
  254. vecTw = vld1q(pCoefVec);
  255. pCoefVec += 4;
  256. vecSum = vecIn0 + vecIn1;
  257. vecDiff = vecIn0 - vecIn1;
  258. vecCmplxTmp = MVE_CMPLX_MULT_FLT_Conj_AxB(vecTw, vecDiff);
  259. vst1q(pIn0, vecSum);
  260. pIn0 += 4;
  261. vst1q(pIn1, vecCmplxTmp);
  262. pIn1 += 4;
  263. blkCnt--;
  264. }
  265. _arm_radix4_butterfly_f32_mve(S, pSrc, n2);
  266. _arm_radix4_butterfly_f32_mve(S, pSrc + fftLen, n2);
  267. pIn0 = pSrc;
  268. }
  269. static void _arm_radix4_butterfly_inverse_f32_mve(const arm_cfft_instance_f32 * S,float32_t * pSrc, uint32_t fftLen, float32_t onebyfftLen)
  270. {
  271. f32x4_t vecTmp0, vecTmp1;
  272. f32x4_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  273. f32x4_t vecA, vecB, vecC, vecD;
  274. f32x4_t vecW;
  275. uint32_t blkCnt;
  276. uint32_t n1, n2;
  277. uint32_t stage = 0;
  278. int32_t iter = 1;
  279. static const uint32_t strides[4] = {
  280. (0 - 16) * sizeof(q31_t *),
  281. (1 - 16) * sizeof(q31_t *),
  282. (8 - 16) * sizeof(q31_t *),
  283. (9 - 16) * sizeof(q31_t *)
  284. };
  285. n2 = fftLen;
  286. n1 = n2;
  287. n2 >>= 2u;
  288. for (int k = fftLen / 4; k > 1; k >>= 2)
  289. {
  290. for (int i = 0; i < iter; i++)
  291. {
  292. float32_t const *p_rearranged_twiddle_tab_stride1 =
  293. &S->rearranged_twiddle_stride1[
  294. S->rearranged_twiddle_tab_stride1_arr[stage]];
  295. float32_t const *p_rearranged_twiddle_tab_stride2 =
  296. &S->rearranged_twiddle_stride2[
  297. S->rearranged_twiddle_tab_stride2_arr[stage]];
  298. float32_t const *p_rearranged_twiddle_tab_stride3 =
  299. &S->rearranged_twiddle_stride3[
  300. S->rearranged_twiddle_tab_stride3_arr[stage]];
  301. float32_t const *pW1, *pW2, *pW3;
  302. float32_t *inA = pSrc + CMPLX_DIM * i * n1;
  303. float32_t *inB = inA + n2 * CMPLX_DIM;
  304. float32_t *inC = inB + n2 * CMPLX_DIM;
  305. float32_t *inD = inC + n2 * CMPLX_DIM;
  306. pW1 = p_rearranged_twiddle_tab_stride1;
  307. pW2 = p_rearranged_twiddle_tab_stride2;
  308. pW3 = p_rearranged_twiddle_tab_stride3;
  309. blkCnt = n2 / 2;
  310. /*
  311. * load 2 f32 complex pair
  312. */
  313. vecA = vldrwq_f32(inA);
  314. vecC = vldrwq_f32(inC);
  315. while (blkCnt > 0U)
  316. {
  317. vecB = vldrwq_f32(inB);
  318. vecD = vldrwq_f32(inD);
  319. vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */
  320. vecDiff0 = vecA - vecC; /* vecSum0 = vsubq(vecA, vecC) */
  321. vecSum1 = vecB + vecD;
  322. vecDiff1 = vecB - vecD;
  323. /*
  324. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  325. */
  326. vecTmp0 = vecSum0 + vecSum1;
  327. vst1q(inA, vecTmp0);
  328. inA += 4;
  329. /*
  330. * [ 1 -1 1 -1 ] * [ A B C D ]'
  331. */
  332. vecTmp0 = vecSum0 - vecSum1;
  333. /*
  334. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W1
  335. */
  336. vecW = vld1q(pW2);
  337. pW2 += 4;
  338. vecTmp1 = MVE_CMPLX_MULT_FLT_AxB(vecW, vecTmp0);
  339. vst1q(inB, vecTmp1);
  340. inB += 4;
  341. /*
  342. * [ 1 -i -1 +i ] * [ A B C D ]'
  343. */
  344. vecTmp0 = MVE_CMPLX_ADD_A_ixB(vecDiff0, vecDiff1);
  345. /*
  346. * [ 1 -i -1 +i ] * [ A B C D ]'.* W2
  347. */
  348. vecW = vld1q(pW1);
  349. pW1 += 4;
  350. vecTmp1 = MVE_CMPLX_MULT_FLT_AxB(vecW, vecTmp0);
  351. vst1q(inC, vecTmp1);
  352. inC += 4;
  353. /*
  354. * [ 1 +i -1 -i ] * [ A B C D ]'
  355. */
  356. vecTmp0 = MVE_CMPLX_SUB_A_ixB(vecDiff0, vecDiff1);
  357. /*
  358. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  359. */
  360. vecW = vld1q(pW3);
  361. pW3 += 4;
  362. vecTmp1 = MVE_CMPLX_MULT_FLT_AxB(vecW, vecTmp0);
  363. vst1q(inD, vecTmp1);
  364. inD += 4;
  365. vecA = vldrwq_f32(inA);
  366. vecC = vldrwq_f32(inC);
  367. blkCnt--;
  368. }
  369. }
  370. n1 = n2;
  371. n2 >>= 2u;
  372. iter = iter << 2;
  373. stage++;
  374. }
  375. /*
  376. * start of Last stage process
  377. */
  378. uint32x4_t vecScGathAddr = *(uint32x4_t *) strides;
  379. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  380. /*
  381. * load scheduling
  382. */
  383. vecA = vldrwq_gather_base_wb_f32(&vecScGathAddr, 64);
  384. vecC = vldrwq_gather_base_f32(vecScGathAddr, 16);
  385. blkCnt = (fftLen >> 3);
  386. while (blkCnt > 0U)
  387. {
  388. vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */
  389. vecDiff0 = vecA - vecC; /* vecSum0 = vsubq(vecA, vecC) */
  390. vecB = vldrwq_gather_base_f32(vecScGathAddr, 8);
  391. vecD = vldrwq_gather_base_f32(vecScGathAddr, 24);
  392. vecSum1 = vecB + vecD;
  393. vecDiff1 = vecB - vecD;
  394. vecA = vldrwq_gather_base_wb_f32(&vecScGathAddr, 64);
  395. vecC = vldrwq_gather_base_f32(vecScGathAddr, 16);
  396. vecTmp0 = vecSum0 + vecSum1;
  397. vecTmp0 = vecTmp0 * onebyfftLen;
  398. vstrwq_scatter_base_f32(vecScGathAddr, -64, vecTmp0);
  399. vecTmp0 = vecSum0 - vecSum1;
  400. vecTmp0 = vecTmp0 * onebyfftLen;
  401. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 8, vecTmp0);
  402. vecTmp0 = MVE_CMPLX_ADD_A_ixB(vecDiff0, vecDiff1);
  403. vecTmp0 = vecTmp0 * onebyfftLen;
  404. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 16, vecTmp0);
  405. vecTmp0 = MVE_CMPLX_SUB_A_ixB(vecDiff0, vecDiff1);
  406. vecTmp0 = vecTmp0 * onebyfftLen;
  407. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 24, vecTmp0);
  408. blkCnt--;
  409. }
  410. /*
  411. * End of last stage process
  412. */
  413. }
  414. static void arm_cfft_radix4by2_inverse_f32_mve(const arm_cfft_instance_f32 * S,float32_t *pSrc, uint32_t fftLen)
  415. {
  416. float32_t const *pCoefVec;
  417. float32_t const *pCoef = S->pTwiddle;
  418. float32_t *pIn0, *pIn1;
  419. uint32_t n2;
  420. float32_t onebyfftLen = arm_inverse_fft_length_f32(fftLen);
  421. uint32_t blkCnt;
  422. f32x4_t vecIn0, vecIn1, vecSum, vecDiff;
  423. f32x4_t vecCmplxTmp, vecTw;
  424. n2 = fftLen >> 1;
  425. pIn0 = pSrc;
  426. pIn1 = pSrc + fftLen;
  427. pCoefVec = pCoef;
  428. blkCnt = n2 / 2;
  429. while (blkCnt > 0U)
  430. {
  431. vecIn0 = *(f32x4_t *) pIn0;
  432. vecIn1 = *(f32x4_t *) pIn1;
  433. vecTw = vld1q(pCoefVec);
  434. pCoefVec += 4;
  435. vecSum = vecIn0 + vecIn1;
  436. vecDiff = vecIn0 - vecIn1;
  437. vecCmplxTmp = MVE_CMPLX_MULT_FLT_AxB(vecTw, vecDiff);
  438. vst1q(pIn0, vecSum);
  439. pIn0 += 4;
  440. vst1q(pIn1, vecCmplxTmp);
  441. pIn1 += 4;
  442. blkCnt--;
  443. }
  444. _arm_radix4_butterfly_inverse_f32_mve(S, pSrc, n2, onebyfftLen);
  445. _arm_radix4_butterfly_inverse_f32_mve(S, pSrc + fftLen, n2, onebyfftLen);
  446. }
  447. /**
  448. @addtogroup ComplexFFT
  449. @{
  450. */
  451. /**
  452. @brief Processing function for the floating-point complex FFT.
  453. @param[in] S points to an instance of the floating-point CFFT structure
  454. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  455. @param[in] ifftFlag flag that selects transform direction
  456. - value = 0: forward transform
  457. - value = 1: inverse transform
  458. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  459. - value = 0: disables bit reversal of output
  460. - value = 1: enables bit reversal of output
  461. @return none
  462. */
  463. void arm_cfft_f32(
  464. const arm_cfft_instance_f32 * S,
  465. float32_t * pSrc,
  466. uint8_t ifftFlag,
  467. uint8_t bitReverseFlag)
  468. {
  469. uint32_t fftLen = S->fftLen;
  470. if (ifftFlag == 1U) {
  471. switch (fftLen) {
  472. case 16:
  473. case 64:
  474. case 256:
  475. case 1024:
  476. case 4096:
  477. _arm_radix4_butterfly_inverse_f32_mve(S, pSrc, fftLen, arm_inverse_fft_length_f32(S->fftLen));
  478. break;
  479. case 32:
  480. case 128:
  481. case 512:
  482. case 2048:
  483. arm_cfft_radix4by2_inverse_f32_mve(S, pSrc, fftLen);
  484. break;
  485. }
  486. } else {
  487. switch (fftLen) {
  488. case 16:
  489. case 64:
  490. case 256:
  491. case 1024:
  492. case 4096:
  493. _arm_radix4_butterfly_f32_mve(S, pSrc, fftLen);
  494. break;
  495. case 32:
  496. case 128:
  497. case 512:
  498. case 2048:
  499. arm_cfft_radix4by2_f32_mve(S, pSrc, fftLen);
  500. break;
  501. }
  502. }
  503. if (bitReverseFlag)
  504. {
  505. arm_bitreversal_f32_inpl_mve((uint32_t*)pSrc, S->bitRevLength, S->pBitRevTable);
  506. }
  507. }
  508. #else
  509. extern void arm_radix8_butterfly_f32(
  510. float32_t * pSrc,
  511. uint16_t fftLen,
  512. const float32_t * pCoef,
  513. uint16_t twidCoefModifier);
  514. extern void arm_bitreversal_32(
  515. uint32_t * pSrc,
  516. const uint16_t bitRevLen,
  517. const uint16_t * pBitRevTable);
  518. /**
  519. @ingroup groupTransforms
  520. */
  521. /**
  522. @defgroup ComplexFFT Complex FFT Functions
  523. @par
  524. The Fast Fourier Transform (FFT) is an efficient algorithm for computing the
  525. Discrete Fourier Transform (DFT). The FFT can be orders of magnitude faster
  526. than the DFT, especially for long lengths.
  527. The algorithms described in this section
  528. operate on complex data. A separate set of functions is devoted to handling
  529. of real sequences.
  530. @par
  531. There are separate algorithms for handling floating-point, Q15, and Q31 data
  532. types. The algorithms available for each data type are described next.
  533. @par
  534. The FFT functions operate in-place. That is, the array holding the input data
  535. will also be used to hold the corresponding result. The input data is complex
  536. and contains <code>2*fftLen</code> interleaved values as shown below.
  537. <pre>{real[0], imag[0], real[1], imag[1], ...} </pre>
  538. The FFT result will be contained in the same array and the frequency domain
  539. values will have the same interleaving.
  540. @par Floating-point
  541. The floating-point complex FFT uses a mixed-radix algorithm. Multiple radix-8
  542. stages are performed along with a single radix-2 or radix-4 stage, as needed.
  543. The algorithm supports lengths of [16, 32, 64, ..., 4096] and each length uses
  544. a different twiddle factor table.
  545. @par
  546. The function uses the standard FFT definition and output values may grow by a
  547. factor of <code>fftLen</code> when computing the forward transform. The
  548. inverse transform includes a scale of <code>1/fftLen</code> as part of the
  549. calculation and this matches the textbook definition of the inverse FFT.
  550. @par
  551. For the MVE version, the new arm_cfft_init_f32 initialization function is
  552. <b>mandatory</b>. <b>Compilation flags are available to include only the required tables for the
  553. needed FFTs.</b> Other FFT versions can continue to be initialized as
  554. explained below.
  555. @par
  556. For not MVE versions, pre-initialized data structures containing twiddle factors
  557. and bit reversal tables are provided and defined in <code>arm_const_structs.h</code>. Include
  558. this header in your function and then pass one of the constant structures as
  559. an argument to arm_cfft_f32. For example:
  560. @par
  561. <code>arm_cfft_f32(arm_cfft_sR_f32_len64, pSrc, 1, 1)</code>
  562. @par
  563. computes a 64-point inverse complex FFT including bit reversal.
  564. The data structures are treated as constant data and not modified during the
  565. calculation. The same data structure can be reused for multiple transforms
  566. including mixing forward and inverse transforms.
  567. @par
  568. Earlier releases of the library provided separate radix-2 and radix-4
  569. algorithms that operated on floating-point data. These functions are still
  570. provided but are deprecated. The older functions are slower and less general
  571. than the new functions.
  572. @par
  573. An example of initialization of the constants for the arm_cfft_f32 function follows:
  574. @code
  575. const static arm_cfft_instance_f32 *S;
  576. ...
  577. switch (length) {
  578. case 16:
  579. S = &arm_cfft_sR_f32_len16;
  580. break;
  581. case 32:
  582. S = &arm_cfft_sR_f32_len32;
  583. break;
  584. case 64:
  585. S = &arm_cfft_sR_f32_len64;
  586. break;
  587. case 128:
  588. S = &arm_cfft_sR_f32_len128;
  589. break;
  590. case 256:
  591. S = &arm_cfft_sR_f32_len256;
  592. break;
  593. case 512:
  594. S = &arm_cfft_sR_f32_len512;
  595. break;
  596. case 1024:
  597. S = &arm_cfft_sR_f32_len1024;
  598. break;
  599. case 2048:
  600. S = &arm_cfft_sR_f32_len2048;
  601. break;
  602. case 4096:
  603. S = &arm_cfft_sR_f32_len4096;
  604. break;
  605. }
  606. @endcode
  607. @par
  608. The new arm_cfft_init_f32 can also be used.
  609. @par Q15 and Q31
  610. The floating-point complex FFT uses a mixed-radix algorithm. Multiple radix-4
  611. stages are performed along with a single radix-2 stage, as needed.
  612. The algorithm supports lengths of [16, 32, 64, ..., 4096] and each length uses
  613. a different twiddle factor table.
  614. @par
  615. The function uses the standard FFT definition and output values may grow by a
  616. factor of <code>fftLen</code> when computing the forward transform. The
  617. inverse transform includes a scale of <code>1/fftLen</code> as part of the
  618. calculation and this matches the textbook definition of the inverse FFT.
  619. @par
  620. Pre-initialized data structures containing twiddle factors and bit reversal
  621. tables are provided and defined in <code>arm_const_structs.h</code>. Include
  622. this header in your function and then pass one of the constant structures as
  623. an argument to arm_cfft_q31. For example:
  624. @par
  625. <code>arm_cfft_q31(arm_cfft_sR_q31_len64, pSrc, 1, 1)</code>
  626. @par
  627. computes a 64-point inverse complex FFT including bit reversal.
  628. The data structures are treated as constant data and not modified during the
  629. calculation. The same data structure can be reused for multiple transforms
  630. including mixing forward and inverse transforms.
  631. @par
  632. Earlier releases of the library provided separate radix-2 and radix-4
  633. algorithms that operated on floating-point data. These functions are still
  634. provided but are deprecated. The older functions are slower and less general
  635. than the new functions.
  636. @par
  637. An example of initialization of the constants for the arm_cfft_q31 function follows:
  638. @code
  639. const static arm_cfft_instance_q31 *S;
  640. ...
  641. switch (length) {
  642. case 16:
  643. S = &arm_cfft_sR_q31_len16;
  644. break;
  645. case 32:
  646. S = &arm_cfft_sR_q31_len32;
  647. break;
  648. case 64:
  649. S = &arm_cfft_sR_q31_len64;
  650. break;
  651. case 128:
  652. S = &arm_cfft_sR_q31_len128;
  653. break;
  654. case 256:
  655. S = &arm_cfft_sR_q31_len256;
  656. break;
  657. case 512:
  658. S = &arm_cfft_sR_q31_len512;
  659. break;
  660. case 1024:
  661. S = &arm_cfft_sR_q31_len1024;
  662. break;
  663. case 2048:
  664. S = &arm_cfft_sR_q31_len2048;
  665. break;
  666. case 4096:
  667. S = &arm_cfft_sR_q31_len4096;
  668. break;
  669. }
  670. @endcode
  671. */
  672. void arm_cfft_radix8by2_f32 (arm_cfft_instance_f32 * S, float32_t * p1)
  673. {
  674. uint32_t L = S->fftLen;
  675. float32_t * pCol1, * pCol2, * pMid1, * pMid2;
  676. float32_t * p2 = p1 + L;
  677. const float32_t * tw = (float32_t *) S->pTwiddle;
  678. float32_t t1[4], t2[4], t3[4], t4[4], twR, twI;
  679. float32_t m0, m1, m2, m3;
  680. uint32_t l;
  681. pCol1 = p1;
  682. pCol2 = p2;
  683. /* Define new length */
  684. L >>= 1;
  685. /* Initialize mid pointers */
  686. pMid1 = p1 + L;
  687. pMid2 = p2 + L;
  688. /* do two dot Fourier transform */
  689. for (l = L >> 2; l > 0; l-- )
  690. {
  691. t1[0] = p1[0];
  692. t1[1] = p1[1];
  693. t1[2] = p1[2];
  694. t1[3] = p1[3];
  695. t2[0] = p2[0];
  696. t2[1] = p2[1];
  697. t2[2] = p2[2];
  698. t2[3] = p2[3];
  699. t3[0] = pMid1[0];
  700. t3[1] = pMid1[1];
  701. t3[2] = pMid1[2];
  702. t3[3] = pMid1[3];
  703. t4[0] = pMid2[0];
  704. t4[1] = pMid2[1];
  705. t4[2] = pMid2[2];
  706. t4[3] = pMid2[3];
  707. *p1++ = t1[0] + t2[0];
  708. *p1++ = t1[1] + t2[1];
  709. *p1++ = t1[2] + t2[2];
  710. *p1++ = t1[3] + t2[3]; /* col 1 */
  711. t2[0] = t1[0] - t2[0];
  712. t2[1] = t1[1] - t2[1];
  713. t2[2] = t1[2] - t2[2];
  714. t2[3] = t1[3] - t2[3]; /* for col 2 */
  715. *pMid1++ = t3[0] + t4[0];
  716. *pMid1++ = t3[1] + t4[1];
  717. *pMid1++ = t3[2] + t4[2];
  718. *pMid1++ = t3[3] + t4[3]; /* col 1 */
  719. t4[0] = t4[0] - t3[0];
  720. t4[1] = t4[1] - t3[1];
  721. t4[2] = t4[2] - t3[2];
  722. t4[3] = t4[3] - t3[3]; /* for col 2 */
  723. twR = *tw++;
  724. twI = *tw++;
  725. /* multiply by twiddle factors */
  726. m0 = t2[0] * twR;
  727. m1 = t2[1] * twI;
  728. m2 = t2[1] * twR;
  729. m3 = t2[0] * twI;
  730. /* R = R * Tr - I * Ti */
  731. *p2++ = m0 + m1;
  732. /* I = I * Tr + R * Ti */
  733. *p2++ = m2 - m3;
  734. /* use vertical symmetry */
  735. /* 0.9988 - 0.0491i <==> -0.0491 - 0.9988i */
  736. m0 = t4[0] * twI;
  737. m1 = t4[1] * twR;
  738. m2 = t4[1] * twI;
  739. m3 = t4[0] * twR;
  740. *pMid2++ = m0 - m1;
  741. *pMid2++ = m2 + m3;
  742. twR = *tw++;
  743. twI = *tw++;
  744. m0 = t2[2] * twR;
  745. m1 = t2[3] * twI;
  746. m2 = t2[3] * twR;
  747. m3 = t2[2] * twI;
  748. *p2++ = m0 + m1;
  749. *p2++ = m2 - m3;
  750. m0 = t4[2] * twI;
  751. m1 = t4[3] * twR;
  752. m2 = t4[3] * twI;
  753. m3 = t4[2] * twR;
  754. *pMid2++ = m0 - m1;
  755. *pMid2++ = m2 + m3;
  756. }
  757. /* first col */
  758. arm_radix8_butterfly_f32 (pCol1, L, (float32_t *) S->pTwiddle, 2U);
  759. /* second col */
  760. arm_radix8_butterfly_f32 (pCol2, L, (float32_t *) S->pTwiddle, 2U);
  761. }
  762. void arm_cfft_radix8by4_f32 (arm_cfft_instance_f32 * S, float32_t * p1)
  763. {
  764. uint32_t L = S->fftLen >> 1;
  765. float32_t * pCol1, *pCol2, *pCol3, *pCol4, *pEnd1, *pEnd2, *pEnd3, *pEnd4;
  766. const float32_t *tw2, *tw3, *tw4;
  767. float32_t * p2 = p1 + L;
  768. float32_t * p3 = p2 + L;
  769. float32_t * p4 = p3 + L;
  770. float32_t t2[4], t3[4], t4[4], twR, twI;
  771. float32_t p1ap3_0, p1sp3_0, p1ap3_1, p1sp3_1;
  772. float32_t m0, m1, m2, m3;
  773. uint32_t l, twMod2, twMod3, twMod4;
  774. pCol1 = p1; /* points to real values by default */
  775. pCol2 = p2;
  776. pCol3 = p3;
  777. pCol4 = p4;
  778. pEnd1 = p2 - 1; /* points to imaginary values by default */
  779. pEnd2 = p3 - 1;
  780. pEnd3 = p4 - 1;
  781. pEnd4 = pEnd3 + L;
  782. tw2 = tw3 = tw4 = (float32_t *) S->pTwiddle;
  783. L >>= 1;
  784. /* do four dot Fourier transform */
  785. twMod2 = 2;
  786. twMod3 = 4;
  787. twMod4 = 6;
  788. /* TOP */
  789. p1ap3_0 = p1[0] + p3[0];
  790. p1sp3_0 = p1[0] - p3[0];
  791. p1ap3_1 = p1[1] + p3[1];
  792. p1sp3_1 = p1[1] - p3[1];
  793. /* col 2 */
  794. t2[0] = p1sp3_0 + p2[1] - p4[1];
  795. t2[1] = p1sp3_1 - p2[0] + p4[0];
  796. /* col 3 */
  797. t3[0] = p1ap3_0 - p2[0] - p4[0];
  798. t3[1] = p1ap3_1 - p2[1] - p4[1];
  799. /* col 4 */
  800. t4[0] = p1sp3_0 - p2[1] + p4[1];
  801. t4[1] = p1sp3_1 + p2[0] - p4[0];
  802. /* col 1 */
  803. *p1++ = p1ap3_0 + p2[0] + p4[0];
  804. *p1++ = p1ap3_1 + p2[1] + p4[1];
  805. /* Twiddle factors are ones */
  806. *p2++ = t2[0];
  807. *p2++ = t2[1];
  808. *p3++ = t3[0];
  809. *p3++ = t3[1];
  810. *p4++ = t4[0];
  811. *p4++ = t4[1];
  812. tw2 += twMod2;
  813. tw3 += twMod3;
  814. tw4 += twMod4;
  815. for (l = (L - 2) >> 1; l > 0; l-- )
  816. {
  817. /* TOP */
  818. p1ap3_0 = p1[0] + p3[0];
  819. p1sp3_0 = p1[0] - p3[0];
  820. p1ap3_1 = p1[1] + p3[1];
  821. p1sp3_1 = p1[1] - p3[1];
  822. /* col 2 */
  823. t2[0] = p1sp3_0 + p2[1] - p4[1];
  824. t2[1] = p1sp3_1 - p2[0] + p4[0];
  825. /* col 3 */
  826. t3[0] = p1ap3_0 - p2[0] - p4[0];
  827. t3[1] = p1ap3_1 - p2[1] - p4[1];
  828. /* col 4 */
  829. t4[0] = p1sp3_0 - p2[1] + p4[1];
  830. t4[1] = p1sp3_1 + p2[0] - p4[0];
  831. /* col 1 - top */
  832. *p1++ = p1ap3_0 + p2[0] + p4[0];
  833. *p1++ = p1ap3_1 + p2[1] + p4[1];
  834. /* BOTTOM */
  835. p1ap3_1 = pEnd1[-1] + pEnd3[-1];
  836. p1sp3_1 = pEnd1[-1] - pEnd3[-1];
  837. p1ap3_0 = pEnd1[ 0] + pEnd3[0];
  838. p1sp3_0 = pEnd1[ 0] - pEnd3[0];
  839. /* col 2 */
  840. t2[2] = pEnd2[0] - pEnd4[0] + p1sp3_1;
  841. t2[3] = pEnd1[0] - pEnd3[0] - pEnd2[-1] + pEnd4[-1];
  842. /* col 3 */
  843. t3[2] = p1ap3_1 - pEnd2[-1] - pEnd4[-1];
  844. t3[3] = p1ap3_0 - pEnd2[ 0] - pEnd4[ 0];
  845. /* col 4 */
  846. t4[2] = pEnd2[ 0] - pEnd4[ 0] - p1sp3_1;
  847. t4[3] = pEnd4[-1] - pEnd2[-1] - p1sp3_0;
  848. /* col 1 - Bottom */
  849. *pEnd1-- = p1ap3_0 + pEnd2[ 0] + pEnd4[ 0];
  850. *pEnd1-- = p1ap3_1 + pEnd2[-1] + pEnd4[-1];
  851. /* COL 2 */
  852. /* read twiddle factors */
  853. twR = *tw2++;
  854. twI = *tw2++;
  855. /* multiply by twiddle factors */
  856. /* let Z1 = a + i(b), Z2 = c + i(d) */
  857. /* => Z1 * Z2 = (a*c - b*d) + i(b*c + a*d) */
  858. /* Top */
  859. m0 = t2[0] * twR;
  860. m1 = t2[1] * twI;
  861. m2 = t2[1] * twR;
  862. m3 = t2[0] * twI;
  863. *p2++ = m0 + m1;
  864. *p2++ = m2 - m3;
  865. /* use vertical symmetry col 2 */
  866. /* 0.9997 - 0.0245i <==> 0.0245 - 0.9997i */
  867. /* Bottom */
  868. m0 = t2[3] * twI;
  869. m1 = t2[2] * twR;
  870. m2 = t2[2] * twI;
  871. m3 = t2[3] * twR;
  872. *pEnd2-- = m0 - m1;
  873. *pEnd2-- = m2 + m3;
  874. /* COL 3 */
  875. twR = tw3[0];
  876. twI = tw3[1];
  877. tw3 += twMod3;
  878. /* Top */
  879. m0 = t3[0] * twR;
  880. m1 = t3[1] * twI;
  881. m2 = t3[1] * twR;
  882. m3 = t3[0] * twI;
  883. *p3++ = m0 + m1;
  884. *p3++ = m2 - m3;
  885. /* use vertical symmetry col 3 */
  886. /* 0.9988 - 0.0491i <==> -0.9988 - 0.0491i */
  887. /* Bottom */
  888. m0 = -t3[3] * twR;
  889. m1 = t3[2] * twI;
  890. m2 = t3[2] * twR;
  891. m3 = t3[3] * twI;
  892. *pEnd3-- = m0 - m1;
  893. *pEnd3-- = m3 - m2;
  894. /* COL 4 */
  895. twR = tw4[0];
  896. twI = tw4[1];
  897. tw4 += twMod4;
  898. /* Top */
  899. m0 = t4[0] * twR;
  900. m1 = t4[1] * twI;
  901. m2 = t4[1] * twR;
  902. m3 = t4[0] * twI;
  903. *p4++ = m0 + m1;
  904. *p4++ = m2 - m3;
  905. /* use vertical symmetry col 4 */
  906. /* 0.9973 - 0.0736i <==> -0.0736 + 0.9973i */
  907. /* Bottom */
  908. m0 = t4[3] * twI;
  909. m1 = t4[2] * twR;
  910. m2 = t4[2] * twI;
  911. m3 = t4[3] * twR;
  912. *pEnd4-- = m0 - m1;
  913. *pEnd4-- = m2 + m3;
  914. }
  915. /* MIDDLE */
  916. /* Twiddle factors are */
  917. /* 1.0000 0.7071-0.7071i -1.0000i -0.7071-0.7071i */
  918. p1ap3_0 = p1[0] + p3[0];
  919. p1sp3_0 = p1[0] - p3[0];
  920. p1ap3_1 = p1[1] + p3[1];
  921. p1sp3_1 = p1[1] - p3[1];
  922. /* col 2 */
  923. t2[0] = p1sp3_0 + p2[1] - p4[1];
  924. t2[1] = p1sp3_1 - p2[0] + p4[0];
  925. /* col 3 */
  926. t3[0] = p1ap3_0 - p2[0] - p4[0];
  927. t3[1] = p1ap3_1 - p2[1] - p4[1];
  928. /* col 4 */
  929. t4[0] = p1sp3_0 - p2[1] + p4[1];
  930. t4[1] = p1sp3_1 + p2[0] - p4[0];
  931. /* col 1 - Top */
  932. *p1++ = p1ap3_0 + p2[0] + p4[0];
  933. *p1++ = p1ap3_1 + p2[1] + p4[1];
  934. /* COL 2 */
  935. twR = tw2[0];
  936. twI = tw2[1];
  937. m0 = t2[0] * twR;
  938. m1 = t2[1] * twI;
  939. m2 = t2[1] * twR;
  940. m3 = t2[0] * twI;
  941. *p2++ = m0 + m1;
  942. *p2++ = m2 - m3;
  943. /* COL 3 */
  944. twR = tw3[0];
  945. twI = tw3[1];
  946. m0 = t3[0] * twR;
  947. m1 = t3[1] * twI;
  948. m2 = t3[1] * twR;
  949. m3 = t3[0] * twI;
  950. *p3++ = m0 + m1;
  951. *p3++ = m2 - m3;
  952. /* COL 4 */
  953. twR = tw4[0];
  954. twI = tw4[1];
  955. m0 = t4[0] * twR;
  956. m1 = t4[1] * twI;
  957. m2 = t4[1] * twR;
  958. m3 = t4[0] * twI;
  959. *p4++ = m0 + m1;
  960. *p4++ = m2 - m3;
  961. /* first col */
  962. arm_radix8_butterfly_f32 (pCol1, L, (float32_t *) S->pTwiddle, 4U);
  963. /* second col */
  964. arm_radix8_butterfly_f32 (pCol2, L, (float32_t *) S->pTwiddle, 4U);
  965. /* third col */
  966. arm_radix8_butterfly_f32 (pCol3, L, (float32_t *) S->pTwiddle, 4U);
  967. /* fourth col */
  968. arm_radix8_butterfly_f32 (pCol4, L, (float32_t *) S->pTwiddle, 4U);
  969. }
  970. /**
  971. @addtogroup ComplexFFT
  972. @{
  973. */
  974. /**
  975. @brief Processing function for the floating-point complex FFT.
  976. @param[in] S points to an instance of the floating-point CFFT structure
  977. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  978. @param[in] ifftFlag flag that selects transform direction
  979. - value = 0: forward transform
  980. - value = 1: inverse transform
  981. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  982. - value = 0: disables bit reversal of output
  983. - value = 1: enables bit reversal of output
  984. @return none
  985. */
  986. void arm_cfft_f32(
  987. const arm_cfft_instance_f32 * S,
  988. float32_t * p1,
  989. uint8_t ifftFlag,
  990. uint8_t bitReverseFlag)
  991. {
  992. uint32_t L = S->fftLen, l;
  993. float32_t invL, * pSrc;
  994. if (ifftFlag == 1U)
  995. {
  996. /* Conjugate input data */
  997. pSrc = p1 + 1;
  998. for (l = 0; l < L; l++)
  999. {
  1000. *pSrc = -*pSrc;
  1001. pSrc += 2;
  1002. }
  1003. }
  1004. switch (L)
  1005. {
  1006. case 16:
  1007. case 128:
  1008. case 1024:
  1009. arm_cfft_radix8by2_f32 ( (arm_cfft_instance_f32 *) S, p1);
  1010. break;
  1011. case 32:
  1012. case 256:
  1013. case 2048:
  1014. arm_cfft_radix8by4_f32 ( (arm_cfft_instance_f32 *) S, p1);
  1015. break;
  1016. case 64:
  1017. case 512:
  1018. case 4096:
  1019. arm_radix8_butterfly_f32 ( p1, L, (float32_t *) S->pTwiddle, 1);
  1020. break;
  1021. }
  1022. if ( bitReverseFlag )
  1023. arm_bitreversal_32 ((uint32_t*) p1, S->bitRevLength, S->pBitRevTable);
  1024. if (ifftFlag == 1U)
  1025. {
  1026. invL = 1.0f / (float32_t)L;
  1027. /* Conjugate and scale output data */
  1028. pSrc = p1;
  1029. for (l= 0; l < L; l++)
  1030. {
  1031. *pSrc++ *= invL ;
  1032. *pSrc = -(*pSrc) * invL;
  1033. pSrc++;
  1034. }
  1035. }
  1036. }
  1037. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  1038. /**
  1039. @} end of ComplexFFT group
  1040. */