arm_cfft_q31.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cfft_q31.c
  4. * Description: Combined Radix Decimation in Frequency CFFT fixed point processing function
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/transform_functions.h"
  29. #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
  30. #include "arm_vec_fft.h"
  31. static void _arm_radix4_butterfly_q31_mve(
  32. const arm_cfft_instance_q31 * S,
  33. q31_t *pSrc,
  34. uint32_t fftLen)
  35. {
  36. q31x4_t vecTmp0, vecTmp1;
  37. q31x4_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  38. q31x4_t vecA, vecB, vecC, vecD;
  39. q31x4_t vecW;
  40. uint32_t blkCnt;
  41. uint32_t n1, n2;
  42. uint32_t stage = 0;
  43. int32_t iter = 1;
  44. static const uint32_t strides[4] = {
  45. (0 - 16) * sizeof(q31_t *), (1 - 16) * sizeof(q31_t *),
  46. (8 - 16) * sizeof(q31_t *), (9 - 16) * sizeof(q31_t *)
  47. };
  48. /*
  49. * Process first stages
  50. * Each stage in middle stages provides two down scaling of the input
  51. */
  52. n2 = fftLen;
  53. n1 = n2;
  54. n2 >>= 2u;
  55. for (int k = fftLen / 4u; k > 1; k >>= 2u)
  56. {
  57. for (int i = 0; i < iter; i++)
  58. {
  59. q31_t const *p_rearranged_twiddle_tab_stride2 =
  60. &S->rearranged_twiddle_stride2[
  61. S->rearranged_twiddle_tab_stride2_arr[stage]];
  62. q31_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[
  63. S->rearranged_twiddle_tab_stride3_arr[stage]];
  64. q31_t const *p_rearranged_twiddle_tab_stride1 =
  65. &S->rearranged_twiddle_stride1[
  66. S->rearranged_twiddle_tab_stride1_arr[stage]];
  67. q31_t const *pW1, *pW2, *pW3;
  68. q31_t *inA = pSrc + CMPLX_DIM * i * n1;
  69. q31_t *inB = inA + n2 * CMPLX_DIM;
  70. q31_t *inC = inB + n2 * CMPLX_DIM;
  71. q31_t *inD = inC + n2 * CMPLX_DIM;
  72. pW1 = p_rearranged_twiddle_tab_stride1;
  73. pW2 = p_rearranged_twiddle_tab_stride2;
  74. pW3 = p_rearranged_twiddle_tab_stride3;
  75. blkCnt = n2 / 2;
  76. /*
  77. * load 2 x q31 complex pair
  78. */
  79. vecA = vldrwq_s32(inA);
  80. vecC = vldrwq_s32(inC);
  81. while (blkCnt > 0U)
  82. {
  83. vecB = vldrwq_s32(inB);
  84. vecD = vldrwq_s32(inD);
  85. vecSum0 = vhaddq(vecA, vecC);
  86. vecDiff0 = vhsubq(vecA, vecC);
  87. vecSum1 = vhaddq(vecB, vecD);
  88. vecDiff1 = vhsubq(vecB, vecD);
  89. /*
  90. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  91. */
  92. vecTmp0 = vhaddq(vecSum0, vecSum1);
  93. vst1q(inA, vecTmp0);
  94. inA += 4;
  95. /*
  96. * [ 1 -1 1 -1 ] * [ A B C D ]'
  97. */
  98. vecTmp0 = vhsubq(vecSum0, vecSum1);
  99. /*
  100. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  101. */
  102. vecW = vld1q(pW2);
  103. pW2 += 4;
  104. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0);
  105. vst1q(inB, vecTmp1);
  106. inB += 4;
  107. /*
  108. * [ 1 -i -1 +i ] * [ A B C D ]'
  109. */
  110. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  111. /*
  112. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  113. */
  114. vecW = vld1q(pW1);
  115. pW1 += 4;
  116. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0);
  117. vst1q(inC, vecTmp1);
  118. inC += 4;
  119. /*
  120. * [ 1 +i -1 -i ] * [ A B C D ]'
  121. */
  122. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  123. /*
  124. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  125. */
  126. vecW = vld1q(pW3);
  127. pW3 += 4;
  128. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0);
  129. vst1q(inD, vecTmp1);
  130. inD += 4;
  131. vecA = vldrwq_s32(inA);
  132. vecC = vldrwq_s32(inC);
  133. blkCnt--;
  134. }
  135. }
  136. n1 = n2;
  137. n2 >>= 2u;
  138. iter = iter << 2;
  139. stage++;
  140. }
  141. /*
  142. * End of 1st stages process
  143. * data is in 11.21(q21) format for the 1024 point as there are 3 middle stages
  144. * data is in 9.23(q23) format for the 256 point as there are 2 middle stages
  145. * data is in 7.25(q25) format for the 64 point as there are 1 middle stage
  146. * data is in 5.27(q27) format for the 16 point as there are no middle stages
  147. */
  148. /*
  149. * start of Last stage process
  150. */
  151. uint32x4_t vecScGathAddr = vld1q_u32(strides);
  152. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  153. /*
  154. * load scheduling
  155. */
  156. vecA = vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  157. vecC = vldrwq_gather_base_s32(vecScGathAddr, 16);
  158. blkCnt = (fftLen >> 3);
  159. while (blkCnt > 0U)
  160. {
  161. vecSum0 = vhaddq(vecA, vecC);
  162. vecDiff0 = vhsubq(vecA, vecC);
  163. vecB = vldrwq_gather_base_s32(vecScGathAddr, 8);
  164. vecD = vldrwq_gather_base_s32(vecScGathAddr, 24);
  165. vecSum1 = vhaddq(vecB, vecD);
  166. vecDiff1 = vhsubq(vecB, vecD);
  167. /*
  168. * pre-load for next iteration
  169. */
  170. vecA = vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  171. vecC = vldrwq_gather_base_s32(vecScGathAddr, 16);
  172. vecTmp0 = vhaddq(vecSum0, vecSum1);
  173. vstrwq_scatter_base_s32(vecScGathAddr, -64, vecTmp0);
  174. vecTmp0 = vhsubq(vecSum0, vecSum1);
  175. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, vecTmp0);
  176. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  177. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 16, vecTmp0);
  178. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  179. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 24, vecTmp0);
  180. blkCnt--;
  181. }
  182. /*
  183. * output is in 11.21(q21) format for the 1024 point
  184. * output is in 9.23(q23) format for the 256 point
  185. * output is in 7.25(q25) format for the 64 point
  186. * output is in 5.27(q27) format for the 16 point
  187. */
  188. }
  189. static void arm_cfft_radix4by2_q31_mve(const arm_cfft_instance_q31 *S, q31_t *pSrc, uint32_t fftLen)
  190. {
  191. uint32_t n2;
  192. q31_t *pIn0;
  193. q31_t *pIn1;
  194. const q31_t *pCoef = S->pTwiddle;
  195. uint32_t blkCnt;
  196. q31x4_t vecIn0, vecIn1, vecSum, vecDiff;
  197. q31x4_t vecCmplxTmp, vecTw;
  198. n2 = fftLen >> 1;
  199. pIn0 = pSrc;
  200. pIn1 = pSrc + fftLen;
  201. blkCnt = n2 / 2;
  202. while (blkCnt > 0U)
  203. {
  204. vecIn0 = vld1q_s32(pIn0);
  205. vecIn1 = vld1q_s32(pIn1);
  206. vecIn0 = vecIn0 >> 1;
  207. vecIn1 = vecIn1 >> 1;
  208. vecSum = vhaddq(vecIn0, vecIn1);
  209. vst1q(pIn0, vecSum);
  210. pIn0 += 4;
  211. vecTw = vld1q_s32(pCoef);
  212. pCoef += 4;
  213. vecDiff = vhsubq(vecIn0, vecIn1);
  214. vecCmplxTmp = MVE_CMPLX_MULT_FX_AxConjB(vecDiff, vecTw);
  215. vst1q(pIn1, vecCmplxTmp);
  216. pIn1 += 4;
  217. blkCnt--;
  218. }
  219. _arm_radix4_butterfly_q31_mve(S, pSrc, n2);
  220. _arm_radix4_butterfly_q31_mve(S, pSrc + fftLen, n2);
  221. pIn0 = pSrc;
  222. blkCnt = (fftLen << 1) >> 2;
  223. while (blkCnt > 0U)
  224. {
  225. vecIn0 = vld1q_s32(pIn0);
  226. vecIn0 = vecIn0 << 1;
  227. vst1q(pIn0, vecIn0);
  228. pIn0 += 4;
  229. blkCnt--;
  230. }
  231. /*
  232. * tail
  233. * (will be merged thru tail predication)
  234. */
  235. blkCnt = (fftLen << 1) & 3;
  236. if (blkCnt > 0U)
  237. {
  238. mve_pred16_t p0 = vctp32q(blkCnt);
  239. vecIn0 = vld1q_s32(pIn0);
  240. vecIn0 = vecIn0 << 1;
  241. vstrwq_p(pIn0, vecIn0, p0);
  242. }
  243. }
  244. static void _arm_radix4_butterfly_inverse_q31_mve(
  245. const arm_cfft_instance_q31 *S,
  246. q31_t *pSrc,
  247. uint32_t fftLen)
  248. {
  249. q31x4_t vecTmp0, vecTmp1;
  250. q31x4_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  251. q31x4_t vecA, vecB, vecC, vecD;
  252. q31x4_t vecW;
  253. uint32_t blkCnt;
  254. uint32_t n1, n2;
  255. uint32_t stage = 0;
  256. int32_t iter = 1;
  257. static const uint32_t strides[4] = {
  258. (0 - 16) * sizeof(q31_t *), (1 - 16) * sizeof(q31_t *),
  259. (8 - 16) * sizeof(q31_t *), (9 - 16) * sizeof(q31_t *)
  260. };
  261. /*
  262. * Process first stages
  263. * Each stage in middle stages provides two down scaling of the input
  264. */
  265. n2 = fftLen;
  266. n1 = n2;
  267. n2 >>= 2u;
  268. for (int k = fftLen / 4u; k > 1; k >>= 2u)
  269. {
  270. for (int i = 0; i < iter; i++)
  271. {
  272. q31_t const *p_rearranged_twiddle_tab_stride2 =
  273. &S->rearranged_twiddle_stride2[
  274. S->rearranged_twiddle_tab_stride2_arr[stage]];
  275. q31_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[
  276. S->rearranged_twiddle_tab_stride3_arr[stage]];
  277. q31_t const *p_rearranged_twiddle_tab_stride1 =
  278. &S->rearranged_twiddle_stride1[
  279. S->rearranged_twiddle_tab_stride1_arr[stage]];
  280. q31_t const *pW1, *pW2, *pW3;
  281. q31_t *inA = pSrc + CMPLX_DIM * i * n1;
  282. q31_t *inB = inA + n2 * CMPLX_DIM;
  283. q31_t *inC = inB + n2 * CMPLX_DIM;
  284. q31_t *inD = inC + n2 * CMPLX_DIM;
  285. pW1 = p_rearranged_twiddle_tab_stride1;
  286. pW2 = p_rearranged_twiddle_tab_stride2;
  287. pW3 = p_rearranged_twiddle_tab_stride3;
  288. blkCnt = n2 / 2;
  289. /*
  290. * load 2 x q31 complex pair
  291. */
  292. vecA = vldrwq_s32(inA);
  293. vecC = vldrwq_s32(inC);
  294. while (blkCnt > 0U)
  295. {
  296. vecB = vldrwq_s32(inB);
  297. vecD = vldrwq_s32(inD);
  298. vecSum0 = vhaddq(vecA, vecC);
  299. vecDiff0 = vhsubq(vecA, vecC);
  300. vecSum1 = vhaddq(vecB, vecD);
  301. vecDiff1 = vhsubq(vecB, vecD);
  302. /*
  303. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  304. */
  305. vecTmp0 = vhaddq(vecSum0, vecSum1);
  306. vst1q(inA, vecTmp0);
  307. inA += 4;
  308. /*
  309. * [ 1 -1 1 -1 ] * [ A B C D ]'
  310. */
  311. vecTmp0 = vhsubq(vecSum0, vecSum1);
  312. /*
  313. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  314. */
  315. vecW = vld1q(pW2);
  316. pW2 += 4;
  317. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW);
  318. vst1q(inB, vecTmp1);
  319. inB += 4;
  320. /*
  321. * [ 1 -i -1 +i ] * [ A B C D ]'
  322. */
  323. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  324. /*
  325. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  326. */
  327. vecW = vld1q(pW1);
  328. pW1 += 4;
  329. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW);
  330. vst1q(inC, vecTmp1);
  331. inC += 4;
  332. /*
  333. * [ 1 +i -1 -i ] * [ A B C D ]'
  334. */
  335. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  336. /*
  337. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  338. */
  339. vecW = vld1q(pW3);
  340. pW3 += 4;
  341. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW);
  342. vst1q(inD, vecTmp1);
  343. inD += 4;
  344. vecA = vldrwq_s32(inA);
  345. vecC = vldrwq_s32(inC);
  346. blkCnt--;
  347. }
  348. }
  349. n1 = n2;
  350. n2 >>= 2u;
  351. iter = iter << 2;
  352. stage++;
  353. }
  354. /*
  355. * End of 1st stages process
  356. * data is in 11.21(q21) format for the 1024 point as there are 3 middle stages
  357. * data is in 9.23(q23) format for the 256 point as there are 2 middle stages
  358. * data is in 7.25(q25) format for the 64 point as there are 1 middle stage
  359. * data is in 5.27(q27) format for the 16 point as there are no middle stages
  360. */
  361. /*
  362. * start of Last stage process
  363. */
  364. uint32x4_t vecScGathAddr = vld1q_u32(strides);
  365. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  366. /*
  367. * load scheduling
  368. */
  369. vecA = vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  370. vecC = vldrwq_gather_base_s32(vecScGathAddr, 16);
  371. blkCnt = (fftLen >> 3);
  372. while (blkCnt > 0U)
  373. {
  374. vecSum0 = vhaddq(vecA, vecC);
  375. vecDiff0 = vhsubq(vecA, vecC);
  376. vecB = vldrwq_gather_base_s32(vecScGathAddr, 8);
  377. vecD = vldrwq_gather_base_s32(vecScGathAddr, 24);
  378. vecSum1 = vhaddq(vecB, vecD);
  379. vecDiff1 = vhsubq(vecB, vecD);
  380. /*
  381. * pre-load for next iteration
  382. */
  383. vecA = vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  384. vecC = vldrwq_gather_base_s32(vecScGathAddr, 16);
  385. vecTmp0 = vhaddq(vecSum0, vecSum1);
  386. vstrwq_scatter_base_s32(vecScGathAddr, -64, vecTmp0);
  387. vecTmp0 = vhsubq(vecSum0, vecSum1);
  388. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, vecTmp0);
  389. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  390. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 16, vecTmp0);
  391. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  392. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 24, vecTmp0);
  393. blkCnt--;
  394. }
  395. /*
  396. * output is in 11.21(q21) format for the 1024 point
  397. * output is in 9.23(q23) format for the 256 point
  398. * output is in 7.25(q25) format for the 64 point
  399. * output is in 5.27(q27) format for the 16 point
  400. */
  401. }
  402. static void arm_cfft_radix4by2_inverse_q31_mve(const arm_cfft_instance_q31 *S, q31_t *pSrc, uint32_t fftLen)
  403. {
  404. uint32_t n2;
  405. q31_t *pIn0;
  406. q31_t *pIn1;
  407. const q31_t *pCoef = S->pTwiddle;
  408. //uint16_t twidCoefModifier = arm_cfft_radix2_twiddle_factor(S->fftLen);
  409. //q31_t twidIncr = (2 * twidCoefModifier * sizeof(q31_t));
  410. uint32_t blkCnt;
  411. //uint64x2_t vecOffs;
  412. q31x4_t vecIn0, vecIn1, vecSum, vecDiff;
  413. q31x4_t vecCmplxTmp, vecTw;
  414. n2 = fftLen >> 1;
  415. pIn0 = pSrc;
  416. pIn1 = pSrc + fftLen;
  417. //vecOffs[0] = 0;
  418. //vecOffs[1] = (uint64_t) twidIncr;
  419. blkCnt = n2 / 2;
  420. while (blkCnt > 0U)
  421. {
  422. vecIn0 = vld1q_s32(pIn0);
  423. vecIn1 = vld1q_s32(pIn1);
  424. vecIn0 = vecIn0 >> 1;
  425. vecIn1 = vecIn1 >> 1;
  426. vecSum = vhaddq(vecIn0, vecIn1);
  427. vst1q(pIn0, vecSum);
  428. pIn0 += 4;
  429. //vecTw = (q31x4_t) vldrdq_gather_offset_s64(pCoef, vecOffs);
  430. vecTw = vld1q_s32(pCoef);
  431. pCoef += 4;
  432. vecDiff = vhsubq(vecIn0, vecIn1);
  433. vecCmplxTmp = MVE_CMPLX_MULT_FX_AxB(vecDiff, vecTw);
  434. vst1q(pIn1, vecCmplxTmp);
  435. pIn1 += 4;
  436. //vecOffs = vaddq((q31x4_t) vecOffs, 2 * twidIncr);
  437. blkCnt--;
  438. }
  439. _arm_radix4_butterfly_inverse_q31_mve(S, pSrc, n2);
  440. _arm_radix4_butterfly_inverse_q31_mve(S, pSrc + fftLen, n2);
  441. pIn0 = pSrc;
  442. blkCnt = (fftLen << 1) >> 2;
  443. while (blkCnt > 0U)
  444. {
  445. vecIn0 = vld1q_s32(pIn0);
  446. vecIn0 = vecIn0 << 1;
  447. vst1q(pIn0, vecIn0);
  448. pIn0 += 4;
  449. blkCnt--;
  450. }
  451. /*
  452. * tail
  453. * (will be merged thru tail predication)
  454. */
  455. blkCnt = (fftLen << 1) & 3;
  456. if (blkCnt > 0U)
  457. {
  458. mve_pred16_t p0 = vctp32q(blkCnt);
  459. vecIn0 = vld1q_s32(pIn0);
  460. vecIn0 = vecIn0 << 1;
  461. vstrwq_p(pIn0, vecIn0, p0);
  462. }
  463. }
  464. /**
  465. @ingroup groupTransforms
  466. */
  467. /**
  468. @addtogroup ComplexFFT
  469. @{
  470. */
  471. /**
  472. @brief Processing function for the Q31 complex FFT.
  473. @param[in] S points to an instance of the fixed-point CFFT structure
  474. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  475. @param[in] ifftFlag flag that selects transform direction
  476. - value = 0: forward transform
  477. - value = 1: inverse transform
  478. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  479. - value = 0: disables bit reversal of output
  480. - value = 1: enables bit reversal of output
  481. @return none
  482. */
  483. void arm_cfft_q31(
  484. const arm_cfft_instance_q31 * S,
  485. q31_t * pSrc,
  486. uint8_t ifftFlag,
  487. uint8_t bitReverseFlag)
  488. {
  489. uint32_t fftLen = S->fftLen;
  490. if (ifftFlag == 1U) {
  491. switch (fftLen) {
  492. case 16:
  493. case 64:
  494. case 256:
  495. case 1024:
  496. case 4096:
  497. _arm_radix4_butterfly_inverse_q31_mve(S, pSrc, fftLen);
  498. break;
  499. case 32:
  500. case 128:
  501. case 512:
  502. case 2048:
  503. arm_cfft_radix4by2_inverse_q31_mve(S, pSrc, fftLen);
  504. break;
  505. }
  506. } else {
  507. switch (fftLen) {
  508. case 16:
  509. case 64:
  510. case 256:
  511. case 1024:
  512. case 4096:
  513. _arm_radix4_butterfly_q31_mve(S, pSrc, fftLen);
  514. break;
  515. case 32:
  516. case 128:
  517. case 512:
  518. case 2048:
  519. arm_cfft_radix4by2_q31_mve(S, pSrc, fftLen);
  520. break;
  521. }
  522. }
  523. if (bitReverseFlag)
  524. {
  525. arm_bitreversal_32_inpl_mve((uint32_t*)pSrc, S->bitRevLength, S->pBitRevTable);
  526. }
  527. }
  528. #else
  529. extern void arm_radix4_butterfly_q31(
  530. q31_t * pSrc,
  531. uint32_t fftLen,
  532. const q31_t * pCoef,
  533. uint32_t twidCoefModifier);
  534. extern void arm_radix4_butterfly_inverse_q31(
  535. q31_t * pSrc,
  536. uint32_t fftLen,
  537. const q31_t * pCoef,
  538. uint32_t twidCoefModifier);
  539. extern void arm_bitreversal_32(
  540. uint32_t * pSrc,
  541. const uint16_t bitRevLen,
  542. const uint16_t * pBitRevTable);
  543. void arm_cfft_radix4by2_q31(
  544. q31_t * pSrc,
  545. uint32_t fftLen,
  546. const q31_t * pCoef);
  547. void arm_cfft_radix4by2_inverse_q31(
  548. q31_t * pSrc,
  549. uint32_t fftLen,
  550. const q31_t * pCoef);
  551. /**
  552. @ingroup groupTransforms
  553. */
  554. /**
  555. @addtogroup ComplexFFT
  556. @{
  557. */
  558. /**
  559. @brief Processing function for the Q31 complex FFT.
  560. @param[in] S points to an instance of the fixed-point CFFT structure
  561. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  562. @param[in] ifftFlag flag that selects transform direction
  563. - value = 0: forward transform
  564. - value = 1: inverse transform
  565. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  566. - value = 0: disables bit reversal of output
  567. - value = 1: enables bit reversal of output
  568. @return none
  569. */
  570. void arm_cfft_q31(
  571. const arm_cfft_instance_q31 * S,
  572. q31_t * p1,
  573. uint8_t ifftFlag,
  574. uint8_t bitReverseFlag)
  575. {
  576. uint32_t L = S->fftLen;
  577. if (ifftFlag == 1U)
  578. {
  579. switch (L)
  580. {
  581. case 16:
  582. case 64:
  583. case 256:
  584. case 1024:
  585. case 4096:
  586. arm_radix4_butterfly_inverse_q31 ( p1, L, (q31_t*)S->pTwiddle, 1 );
  587. break;
  588. case 32:
  589. case 128:
  590. case 512:
  591. case 2048:
  592. arm_cfft_radix4by2_inverse_q31 ( p1, L, S->pTwiddle );
  593. break;
  594. }
  595. }
  596. else
  597. {
  598. switch (L)
  599. {
  600. case 16:
  601. case 64:
  602. case 256:
  603. case 1024:
  604. case 4096:
  605. arm_radix4_butterfly_q31 ( p1, L, (q31_t*)S->pTwiddle, 1 );
  606. break;
  607. case 32:
  608. case 128:
  609. case 512:
  610. case 2048:
  611. arm_cfft_radix4by2_q31 ( p1, L, S->pTwiddle );
  612. break;
  613. }
  614. }
  615. if ( bitReverseFlag )
  616. arm_bitreversal_32 ((uint32_t*) p1, S->bitRevLength, S->pBitRevTable);
  617. }
  618. /**
  619. @} end of ComplexFFT group
  620. */
  621. void arm_cfft_radix4by2_q31(
  622. q31_t * pSrc,
  623. uint32_t fftLen,
  624. const q31_t * pCoef)
  625. {
  626. uint32_t i, l;
  627. uint32_t n2;
  628. q31_t xt, yt, cosVal, sinVal;
  629. q31_t p0, p1;
  630. n2 = fftLen >> 1U;
  631. for (i = 0; i < n2; i++)
  632. {
  633. cosVal = pCoef[2 * i];
  634. sinVal = pCoef[2 * i + 1];
  635. l = i + n2;
  636. xt = (pSrc[2 * i] >> 2U) - (pSrc[2 * l] >> 2U);
  637. pSrc[2 * i] = (pSrc[2 * i] >> 2U) + (pSrc[2 * l] >> 2U);
  638. yt = (pSrc[2 * i + 1] >> 2U) - (pSrc[2 * l + 1] >> 2U);
  639. pSrc[2 * i + 1] = (pSrc[2 * l + 1] >> 2U) + (pSrc[2 * i + 1] >> 2U);
  640. mult_32x32_keep32_R(p0, xt, cosVal);
  641. mult_32x32_keep32_R(p1, yt, cosVal);
  642. multAcc_32x32_keep32_R(p0, yt, sinVal);
  643. multSub_32x32_keep32_R(p1, xt, sinVal);
  644. pSrc[2 * l] = p0 << 1;
  645. pSrc[2 * l + 1] = p1 << 1;
  646. }
  647. /* first col */
  648. arm_radix4_butterfly_q31 (pSrc, n2, (q31_t*)pCoef, 2U);
  649. /* second col */
  650. arm_radix4_butterfly_q31 (pSrc + fftLen, n2, (q31_t*)pCoef, 2U);
  651. n2 = fftLen >> 1U;
  652. for (i = 0; i < n2; i++)
  653. {
  654. p0 = pSrc[4 * i + 0];
  655. p1 = pSrc[4 * i + 1];
  656. xt = pSrc[4 * i + 2];
  657. yt = pSrc[4 * i + 3];
  658. p0 <<= 1U;
  659. p1 <<= 1U;
  660. xt <<= 1U;
  661. yt <<= 1U;
  662. pSrc[4 * i + 0] = p0;
  663. pSrc[4 * i + 1] = p1;
  664. pSrc[4 * i + 2] = xt;
  665. pSrc[4 * i + 3] = yt;
  666. }
  667. }
  668. void arm_cfft_radix4by2_inverse_q31(
  669. q31_t * pSrc,
  670. uint32_t fftLen,
  671. const q31_t * pCoef)
  672. {
  673. uint32_t i, l;
  674. uint32_t n2;
  675. q31_t xt, yt, cosVal, sinVal;
  676. q31_t p0, p1;
  677. n2 = fftLen >> 1U;
  678. for (i = 0; i < n2; i++)
  679. {
  680. cosVal = pCoef[2 * i];
  681. sinVal = pCoef[2 * i + 1];
  682. l = i + n2;
  683. xt = (pSrc[2 * i] >> 2U) - (pSrc[2 * l] >> 2U);
  684. pSrc[2 * i] = (pSrc[2 * i] >> 2U) + (pSrc[2 * l] >> 2U);
  685. yt = (pSrc[2 * i + 1] >> 2U) - (pSrc[2 * l + 1] >> 2U);
  686. pSrc[2 * i + 1] = (pSrc[2 * l + 1] >> 2U) + (pSrc[2 * i + 1] >> 2U);
  687. mult_32x32_keep32_R(p0, xt, cosVal);
  688. mult_32x32_keep32_R(p1, yt, cosVal);
  689. multSub_32x32_keep32_R(p0, yt, sinVal);
  690. multAcc_32x32_keep32_R(p1, xt, sinVal);
  691. pSrc[2 * l] = p0 << 1U;
  692. pSrc[2 * l + 1] = p1 << 1U;
  693. }
  694. /* first col */
  695. arm_radix4_butterfly_inverse_q31( pSrc, n2, (q31_t*)pCoef, 2U);
  696. /* second col */
  697. arm_radix4_butterfly_inverse_q31( pSrc + fftLen, n2, (q31_t*)pCoef, 2U);
  698. n2 = fftLen >> 1U;
  699. for (i = 0; i < n2; i++)
  700. {
  701. p0 = pSrc[4 * i + 0];
  702. p1 = pSrc[4 * i + 1];
  703. xt = pSrc[4 * i + 2];
  704. yt = pSrc[4 * i + 3];
  705. p0 <<= 1U;
  706. p1 <<= 1U;
  707. xt <<= 1U;
  708. yt <<= 1U;
  709. pSrc[4 * i + 0] = p0;
  710. pSrc[4 * i + 1] = p1;
  711. pSrc[4 * i + 2] = xt;
  712. pSrc[4 * i + 3] = yt;
  713. }
  714. }
  715. #endif /* defined(ARM_MATH_MVEI) */