arm_cfft_f16.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cfft_f32.c
  4. * Description: Combined Radix Decimation in Frequency CFFT Floating point processing function
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/transform_functions_f16.h"
  29. #include "arm_common_tables_f16.h"
  30. #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
  31. #include "arm_helium_utils.h"
  32. #include "arm_vec_fft.h"
  33. #include "arm_mve_tables_f16.h"
  34. static float16_t arm_inverse_fft_length_f16(uint16_t fftLen)
  35. {
  36. float16_t retValue=1.0;
  37. switch (fftLen)
  38. {
  39. case 4096U:
  40. retValue = (float16_t)0.000244140625f;
  41. break;
  42. case 2048U:
  43. retValue = (float16_t)0.00048828125f;
  44. break;
  45. case 1024U:
  46. retValue = (float16_t)0.0009765625f;
  47. break;
  48. case 512U:
  49. retValue = (float16_t)0.001953125f;
  50. break;
  51. case 256U:
  52. retValue = (float16_t)0.00390625f;
  53. break;
  54. case 128U:
  55. retValue = (float16_t)0.0078125f;
  56. break;
  57. case 64U:
  58. retValue = (float16_t)0.015625f;
  59. break;
  60. case 32U:
  61. retValue = (float16_t)0.03125f;
  62. break;
  63. case 16U:
  64. retValue = (float16_t)0.0625f;
  65. break;
  66. default:
  67. break;
  68. }
  69. return(retValue);
  70. }
  71. static void _arm_radix4_butterfly_f16_mve(const arm_cfft_instance_f16 * S,float16_t * pSrc, uint32_t fftLen)
  72. {
  73. f16x8_t vecTmp0, vecTmp1;
  74. f16x8_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  75. f16x8_t vecA, vecB, vecC, vecD;
  76. uint32_t blkCnt;
  77. uint32_t n1, n2;
  78. uint32_t stage = 0;
  79. int32_t iter = 1;
  80. static const uint32_t strides[4] =
  81. {(0 - 16) * sizeof(float16_t *)
  82. , (4 - 16) * sizeof(float16_t *)
  83. , (8 - 16) * sizeof(float16_t *)
  84. , (12 - 16) * sizeof(float16_t *)};
  85. n2 = fftLen;
  86. n1 = n2;
  87. n2 >>= 2u;
  88. for (int k = fftLen / 4u; k > 1; k >>= 2)
  89. {
  90. for (int i = 0; i < iter; i++)
  91. {
  92. float16_t const *p_rearranged_twiddle_tab_stride1 =
  93. &S->rearranged_twiddle_stride1[
  94. S->rearranged_twiddle_tab_stride1_arr[stage]];
  95. float16_t const *p_rearranged_twiddle_tab_stride2 =
  96. &S->rearranged_twiddle_stride2[
  97. S->rearranged_twiddle_tab_stride2_arr[stage]];
  98. float16_t const *p_rearranged_twiddle_tab_stride3 =
  99. &S->rearranged_twiddle_stride3[
  100. S->rearranged_twiddle_tab_stride3_arr[stage]];
  101. float16_t const *pW1, *pW2, *pW3;
  102. float16_t *inA = pSrc + CMPLX_DIM * i * n1;
  103. float16_t *inB = inA + n2 * CMPLX_DIM;
  104. float16_t *inC = inB + n2 * CMPLX_DIM;
  105. float16_t *inD = inC + n2 * CMPLX_DIM;
  106. f16x8_t vecW;
  107. pW1 = p_rearranged_twiddle_tab_stride1;
  108. pW2 = p_rearranged_twiddle_tab_stride2;
  109. pW3 = p_rearranged_twiddle_tab_stride3;
  110. blkCnt = n2 / 4;
  111. /*
  112. * load 2 f16 complex pair
  113. */
  114. vecA = vldrhq_f16(inA);
  115. vecC = vldrhq_f16(inC);
  116. while (blkCnt > 0U)
  117. {
  118. vecB = vldrhq_f16(inB);
  119. vecD = vldrhq_f16(inD);
  120. vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */
  121. vecDiff0 = vecA - vecC; /* vecSum0 = vsubq(vecA, vecC) */
  122. vecSum1 = vecB + vecD;
  123. vecDiff1 = vecB - vecD;
  124. /*
  125. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  126. */
  127. vecTmp0 = vecSum0 + vecSum1;
  128. vst1q(inA, vecTmp0);
  129. inA += 8;
  130. /*
  131. * [ 1 -1 1 -1 ] * [ A B C D ]'
  132. */
  133. vecTmp0 = vecSum0 - vecSum1;
  134. /*
  135. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  136. */
  137. vecW = vld1q(pW2);
  138. pW2 += 8;
  139. vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0);
  140. vst1q(inB, vecTmp1);
  141. inB += 8;
  142. /*
  143. * [ 1 -i -1 +i ] * [ A B C D ]'
  144. */
  145. vecTmp0 = MVE_CMPLX_SUB_A_ixB(vecDiff0, vecDiff1);
  146. /*
  147. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  148. */
  149. vecW = vld1q(pW1);
  150. pW1 +=8;
  151. vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0);
  152. vst1q(inC, vecTmp1);
  153. inC += 8;
  154. /*
  155. * [ 1 +i -1 -i ] * [ A B C D ]'
  156. */
  157. vecTmp0 = MVE_CMPLX_ADD_A_ixB(vecDiff0, vecDiff1);
  158. /*
  159. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  160. */
  161. vecW = vld1q(pW3);
  162. pW3 += 8;
  163. vecTmp1 = MVE_CMPLX_MULT_FLT_Conj_AxB(vecW, vecTmp0);
  164. vst1q(inD, vecTmp1);
  165. inD += 8;
  166. vecA = vldrhq_f16(inA);
  167. vecC = vldrhq_f16(inC);
  168. blkCnt--;
  169. }
  170. }
  171. n1 = n2;
  172. n2 >>= 2u;
  173. iter = iter << 2;
  174. stage++;
  175. }
  176. /*
  177. * start of Last stage process
  178. */
  179. uint32x4_t vecScGathAddr = vld1q_u32(strides);
  180. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  181. /* load scheduling */
  182. vecA = (f16x8_t)vldrwq_gather_base_wb_f32(&vecScGathAddr, 64);
  183. vecC = (f16x8_t)vldrwq_gather_base_f32(vecScGathAddr, 8);
  184. blkCnt = (fftLen >> 4);
  185. while (blkCnt > 0U)
  186. {
  187. vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */
  188. vecDiff0 = vecA - vecC; /* vecSum0 = vsubq(vecA, vecC) */
  189. vecB = (f16x8_t)vldrwq_gather_base_f32(vecScGathAddr, 4);
  190. vecD = (f16x8_t)vldrwq_gather_base_f32(vecScGathAddr, 12);
  191. vecSum1 = vecB + vecD;
  192. vecDiff1 = vecB - vecD;
  193. /* pre-load for next iteration */
  194. vecA = (f16x8_t)vldrwq_gather_base_wb_f32(&vecScGathAddr, 64);
  195. vecC = (f16x8_t)vldrwq_gather_base_f32(vecScGathAddr, 8);
  196. vecTmp0 = vecSum0 + vecSum1;
  197. vstrwq_scatter_base_f32(vecScGathAddr, -64, (f32x4_t)vecTmp0);
  198. vecTmp0 = vecSum0 - vecSum1;
  199. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 4, (f32x4_t)vecTmp0);
  200. vecTmp0 = MVE_CMPLX_SUB_A_ixB(vecDiff0, vecDiff1);
  201. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 8, (f32x4_t)vecTmp0);
  202. vecTmp0 = MVE_CMPLX_ADD_A_ixB(vecDiff0, vecDiff1);
  203. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 12, (f32x4_t)vecTmp0);
  204. blkCnt--;
  205. }
  206. /*
  207. * End of last stage process
  208. */
  209. }
  210. static void arm_cfft_radix4by2_f16_mve(const arm_cfft_instance_f16 * S, float16_t *pSrc, uint32_t fftLen)
  211. {
  212. float16_t const *pCoefVec;
  213. float16_t const *pCoef = S->pTwiddle;
  214. float16_t *pIn0, *pIn1;
  215. uint32_t n2;
  216. uint32_t blkCnt;
  217. f16x8_t vecIn0, vecIn1, vecSum, vecDiff;
  218. f16x8_t vecCmplxTmp, vecTw;
  219. n2 = fftLen >> 1;
  220. pIn0 = pSrc;
  221. pIn1 = pSrc + fftLen;
  222. pCoefVec = pCoef;
  223. blkCnt = n2 / 4;
  224. while (blkCnt > 0U)
  225. {
  226. vecIn0 = *(f16x8_t *) pIn0;
  227. vecIn1 = *(f16x8_t *) pIn1;
  228. vecTw = vld1q(pCoefVec);
  229. pCoefVec += 8;
  230. vecSum = vaddq(vecIn0, vecIn1);
  231. vecDiff = vsubq(vecIn0, vecIn1);
  232. vecCmplxTmp = MVE_CMPLX_MULT_FLT_Conj_AxB(vecTw, vecDiff);
  233. vst1q(pIn0, vecSum);
  234. pIn0 += 8;
  235. vst1q(pIn1, vecCmplxTmp);
  236. pIn1 += 8;
  237. blkCnt--;
  238. }
  239. _arm_radix4_butterfly_f16_mve(S, pSrc, n2);
  240. _arm_radix4_butterfly_f16_mve(S, pSrc + fftLen, n2);
  241. pIn0 = pSrc;
  242. }
  243. static void _arm_radix4_butterfly_inverse_f16_mve(const arm_cfft_instance_f16 * S,float16_t * pSrc, uint32_t fftLen, float16_t onebyfftLen)
  244. {
  245. f16x8_t vecTmp0, vecTmp1;
  246. f16x8_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  247. f16x8_t vecA, vecB, vecC, vecD;
  248. f16x8_t vecW;
  249. uint32_t blkCnt;
  250. uint32_t n1, n2;
  251. uint32_t stage = 0;
  252. int32_t iter = 1;
  253. static const uint32_t strides[4] = {
  254. (0 - 16) * sizeof(q31_t *),
  255. (4 - 16) * sizeof(q31_t *),
  256. (8 - 16) * sizeof(q31_t *),
  257. (12 - 16) * sizeof(q31_t *)
  258. };
  259. n2 = fftLen;
  260. n1 = n2;
  261. n2 >>= 2u;
  262. for (int k = fftLen / 4; k > 1; k >>= 2)
  263. {
  264. for (int i = 0; i < iter; i++)
  265. {
  266. float16_t const *p_rearranged_twiddle_tab_stride1 =
  267. &S->rearranged_twiddle_stride1[
  268. S->rearranged_twiddle_tab_stride1_arr[stage]];
  269. float16_t const *p_rearranged_twiddle_tab_stride2 =
  270. &S->rearranged_twiddle_stride2[
  271. S->rearranged_twiddle_tab_stride2_arr[stage]];
  272. float16_t const *p_rearranged_twiddle_tab_stride3 =
  273. &S->rearranged_twiddle_stride3[
  274. S->rearranged_twiddle_tab_stride3_arr[stage]];
  275. float16_t const *pW1, *pW2, *pW3;
  276. float16_t *inA = pSrc + CMPLX_DIM * i * n1;
  277. float16_t *inB = inA + n2 * CMPLX_DIM;
  278. float16_t *inC = inB + n2 * CMPLX_DIM;
  279. float16_t *inD = inC + n2 * CMPLX_DIM;
  280. pW1 = p_rearranged_twiddle_tab_stride1;
  281. pW2 = p_rearranged_twiddle_tab_stride2;
  282. pW3 = p_rearranged_twiddle_tab_stride3;
  283. blkCnt = n2 / 4;
  284. /*
  285. * load 2 f32 complex pair
  286. */
  287. vecA = vldrhq_f16(inA);
  288. vecC = vldrhq_f16(inC);
  289. while (blkCnt > 0U)
  290. {
  291. vecB = vldrhq_f16(inB);
  292. vecD = vldrhq_f16(inD);
  293. vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */
  294. vecDiff0 = vecA - vecC; /* vecSum0 = vsubq(vecA, vecC) */
  295. vecSum1 = vecB + vecD;
  296. vecDiff1 = vecB - vecD;
  297. /*
  298. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  299. */
  300. vecTmp0 = vecSum0 + vecSum1;
  301. vst1q(inA, vecTmp0);
  302. inA += 8;
  303. /*
  304. * [ 1 -1 1 -1 ] * [ A B C D ]'
  305. */
  306. vecTmp0 = vecSum0 - vecSum1;
  307. /*
  308. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W1
  309. */
  310. vecW = vld1q(pW2);
  311. pW2 += 8;
  312. vecTmp1 = MVE_CMPLX_MULT_FLT_AxB(vecW, vecTmp0);
  313. vst1q(inB, vecTmp1);
  314. inB += 8;
  315. /*
  316. * [ 1 -i -1 +i ] * [ A B C D ]'
  317. */
  318. vecTmp0 = MVE_CMPLX_ADD_A_ixB(vecDiff0, vecDiff1);
  319. /*
  320. * [ 1 -i -1 +i ] * [ A B C D ]'.* W2
  321. */
  322. vecW = vld1q(pW1);
  323. pW1 += 8;
  324. vecTmp1 = MVE_CMPLX_MULT_FLT_AxB(vecW, vecTmp0);
  325. vst1q(inC, vecTmp1);
  326. inC += 8;
  327. /*
  328. * [ 1 +i -1 -i ] * [ A B C D ]'
  329. */
  330. vecTmp0 = MVE_CMPLX_SUB_A_ixB(vecDiff0, vecDiff1);
  331. /*
  332. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  333. */
  334. vecW = vld1q(pW3);
  335. pW3 += 8;
  336. vecTmp1 = MVE_CMPLX_MULT_FLT_AxB(vecW, vecTmp0);
  337. vst1q(inD, vecTmp1);
  338. inD += 8;
  339. vecA = vldrhq_f16(inA);
  340. vecC = vldrhq_f16(inC);
  341. blkCnt--;
  342. }
  343. }
  344. n1 = n2;
  345. n2 >>= 2u;
  346. iter = iter << 2;
  347. stage++;
  348. }
  349. /*
  350. * start of Last stage process
  351. */
  352. uint32x4_t vecScGathAddr = vld1q_u32(strides);
  353. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  354. /*
  355. * load scheduling
  356. */
  357. vecA = (f16x8_t)vldrwq_gather_base_wb_f32(&vecScGathAddr, 64);
  358. vecC = (f16x8_t)vldrwq_gather_base_f32(vecScGathAddr, 8);
  359. blkCnt = (fftLen >> 4);
  360. while (blkCnt > 0U)
  361. {
  362. vecSum0 = vecA + vecC; /* vecSum0 = vaddq(vecA, vecC) */
  363. vecDiff0 = vecA - vecC; /* vecSum0 = vsubq(vecA, vecC) */
  364. vecB = (f16x8_t)vldrwq_gather_base_f32(vecScGathAddr, 4);
  365. vecD = (f16x8_t)vldrwq_gather_base_f32(vecScGathAddr, 12);
  366. vecSum1 = vecB + vecD;
  367. vecDiff1 = vecB - vecD;
  368. vecA = (f16x8_t)vldrwq_gather_base_wb_f32(&vecScGathAddr, 64);
  369. vecC = (f16x8_t)vldrwq_gather_base_f32(vecScGathAddr, 8);
  370. vecTmp0 = vecSum0 + vecSum1;
  371. vecTmp0 = vecTmp0 * onebyfftLen;
  372. vstrwq_scatter_base_f32(vecScGathAddr, -64, (f32x4_t)vecTmp0);
  373. vecTmp0 = vecSum0 - vecSum1;
  374. vecTmp0 = vecTmp0 * onebyfftLen;
  375. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 4, (f32x4_t)vecTmp0);
  376. vecTmp0 = MVE_CMPLX_ADD_A_ixB(vecDiff0, vecDiff1);
  377. vecTmp0 = vecTmp0 * onebyfftLen;
  378. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 8, (f32x4_t)vecTmp0);
  379. vecTmp0 = MVE_CMPLX_SUB_A_ixB(vecDiff0, vecDiff1);
  380. vecTmp0 = vecTmp0 * onebyfftLen;
  381. vstrwq_scatter_base_f32(vecScGathAddr, -64 + 12, (f32x4_t)vecTmp0);
  382. blkCnt--;
  383. }
  384. /*
  385. * End of last stage process
  386. */
  387. }
  388. static void arm_cfft_radix4by2_inverse_f16_mve(const arm_cfft_instance_f16 * S,float16_t *pSrc, uint32_t fftLen)
  389. {
  390. float16_t const *pCoefVec;
  391. float16_t const *pCoef = S->pTwiddle;
  392. float16_t *pIn0, *pIn1;
  393. uint32_t n2;
  394. float16_t onebyfftLen = arm_inverse_fft_length_f16(fftLen);
  395. uint32_t blkCnt;
  396. f16x8_t vecIn0, vecIn1, vecSum, vecDiff;
  397. f16x8_t vecCmplxTmp, vecTw;
  398. n2 = fftLen >> 1;
  399. pIn0 = pSrc;
  400. pIn1 = pSrc + fftLen;
  401. pCoefVec = pCoef;
  402. blkCnt = n2 / 4;
  403. while (blkCnt > 0U)
  404. {
  405. vecIn0 = *(f16x8_t *) pIn0;
  406. vecIn1 = *(f16x8_t *) pIn1;
  407. vecTw = vld1q(pCoefVec);
  408. pCoefVec += 8;
  409. vecSum = vaddq(vecIn0, vecIn1);
  410. vecDiff = vsubq(vecIn0, vecIn1);
  411. vecCmplxTmp = MVE_CMPLX_MULT_FLT_AxB(vecTw, vecDiff);
  412. vst1q(pIn0, vecSum);
  413. pIn0 += 8;
  414. vst1q(pIn1, vecCmplxTmp);
  415. pIn1 += 8;
  416. blkCnt--;
  417. }
  418. _arm_radix4_butterfly_inverse_f16_mve(S, pSrc, n2, onebyfftLen);
  419. _arm_radix4_butterfly_inverse_f16_mve(S, pSrc + fftLen, n2, onebyfftLen);
  420. }
  421. /**
  422. @addtogroup ComplexFFT
  423. @{
  424. */
  425. /**
  426. @brief Processing function for the floating-point complex FFT.
  427. @param[in] S points to an instance of the floating-point CFFT structure
  428. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  429. @param[in] ifftFlag flag that selects transform direction
  430. - value = 0: forward transform
  431. - value = 1: inverse transform
  432. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  433. - value = 0: disables bit reversal of output
  434. - value = 1: enables bit reversal of output
  435. @return none
  436. */
  437. void arm_cfft_f16(
  438. const arm_cfft_instance_f16 * S,
  439. float16_t * pSrc,
  440. uint8_t ifftFlag,
  441. uint8_t bitReverseFlag)
  442. {
  443. uint32_t fftLen = S->fftLen;
  444. if (ifftFlag == 1U) {
  445. switch (fftLen) {
  446. case 16:
  447. case 64:
  448. case 256:
  449. case 1024:
  450. case 4096:
  451. _arm_radix4_butterfly_inverse_f16_mve(S, pSrc, fftLen, arm_inverse_fft_length_f16(S->fftLen));
  452. break;
  453. case 32:
  454. case 128:
  455. case 512:
  456. case 2048:
  457. arm_cfft_radix4by2_inverse_f16_mve(S, pSrc, fftLen);
  458. break;
  459. }
  460. } else {
  461. switch (fftLen) {
  462. case 16:
  463. case 64:
  464. case 256:
  465. case 1024:
  466. case 4096:
  467. _arm_radix4_butterfly_f16_mve(S, pSrc, fftLen);
  468. break;
  469. case 32:
  470. case 128:
  471. case 512:
  472. case 2048:
  473. arm_cfft_radix4by2_f16_mve(S, pSrc, fftLen);
  474. break;
  475. }
  476. }
  477. if (bitReverseFlag)
  478. {
  479. arm_bitreversal_16_inpl_mve((uint16_t*)pSrc, S->bitRevLength, S->pBitRevTable);
  480. }
  481. }
  482. #else
  483. #if defined(ARM_FLOAT16_SUPPORTED)
  484. extern void arm_bitreversal_16(
  485. uint16_t * pSrc,
  486. const uint16_t bitRevLen,
  487. const uint16_t * pBitRevTable);
  488. extern void arm_cfft_radix4by2_f16(
  489. float16_t * pSrc,
  490. uint32_t fftLen,
  491. const float16_t * pCoef);
  492. extern void arm_radix4_butterfly_f16(
  493. float16_t * pSrc,
  494. uint16_t fftLen,
  495. const float16_t * pCoef,
  496. uint16_t twidCoefModifier);
  497. /**
  498. @ingroup groupTransforms
  499. */
  500. /**
  501. @defgroup ComplexFFT Complex FFT Functions
  502. @par
  503. The Fast Fourier Transform (FFT) is an efficient algorithm for computing the
  504. Discrete Fourier Transform (DFT). The FFT can be orders of magnitude faster
  505. than the DFT, especially for long lengths.
  506. The algorithms described in this section
  507. operate on complex data. A separate set of functions is devoted to handling
  508. of real sequences.
  509. @par
  510. There are separate algorithms for handling floating-point, Q15, and Q31 data
  511. types. The algorithms available for each data type are described next.
  512. @par
  513. The FFT functions operate in-place. That is, the array holding the input data
  514. will also be used to hold the corresponding result. The input data is complex
  515. and contains <code>2*fftLen</code> interleaved values as shown below.
  516. <pre>{real[0], imag[0], real[1], imag[1], ...} </pre>
  517. The FFT result will be contained in the same array and the frequency domain
  518. values will have the same interleaving.
  519. @par Floating-point
  520. The floating-point complex FFT uses a mixed-radix algorithm. Multiple radix-8
  521. stages are performed along with a single radix-2 or radix-4 stage, as needed.
  522. The algorithm supports lengths of [16, 32, 64, ..., 4096] and each length uses
  523. a different twiddle factor table.
  524. @par
  525. The function uses the standard FFT definition and output values may grow by a
  526. factor of <code>fftLen</code> when computing the forward transform. The
  527. inverse transform includes a scale of <code>1/fftLen</code> as part of the
  528. calculation and this matches the textbook definition of the inverse FFT.
  529. @par
  530. For the MVE version, the new arm_cfft_init_f32 initialization function is
  531. <b>mandatory</b>. <b>Compilation flags are available to include only the required tables for the
  532. needed FFTs.</b> Other FFT versions can continue to be initialized as
  533. explained below.
  534. @par
  535. For not MVE versions, pre-initialized data structures containing twiddle factors
  536. and bit reversal tables are provided and defined in <code>arm_const_structs.h</code>. Include
  537. this header in your function and then pass one of the constant structures as
  538. an argument to arm_cfft_f32. For example:
  539. @par
  540. <code>arm_cfft_f32(arm_cfft_sR_f32_len64, pSrc, 1, 1)</code>
  541. @par
  542. computes a 64-point inverse complex FFT including bit reversal.
  543. The data structures are treated as constant data and not modified during the
  544. calculation. The same data structure can be reused for multiple transforms
  545. including mixing forward and inverse transforms.
  546. @par
  547. Earlier releases of the library provided separate radix-2 and radix-4
  548. algorithms that operated on floating-point data. These functions are still
  549. provided but are deprecated. The older functions are slower and less general
  550. than the new functions.
  551. @par
  552. An example of initialization of the constants for the arm_cfft_f32 function follows:
  553. @code
  554. const static arm_cfft_instance_f32 *S;
  555. ...
  556. switch (length) {
  557. case 16:
  558. S = &arm_cfft_sR_f32_len16;
  559. break;
  560. case 32:
  561. S = &arm_cfft_sR_f32_len32;
  562. break;
  563. case 64:
  564. S = &arm_cfft_sR_f32_len64;
  565. break;
  566. case 128:
  567. S = &arm_cfft_sR_f32_len128;
  568. break;
  569. case 256:
  570. S = &arm_cfft_sR_f32_len256;
  571. break;
  572. case 512:
  573. S = &arm_cfft_sR_f32_len512;
  574. break;
  575. case 1024:
  576. S = &arm_cfft_sR_f32_len1024;
  577. break;
  578. case 2048:
  579. S = &arm_cfft_sR_f32_len2048;
  580. break;
  581. case 4096:
  582. S = &arm_cfft_sR_f32_len4096;
  583. break;
  584. }
  585. @endcode
  586. @par
  587. The new arm_cfft_init_f32 can also be used.
  588. @par Q15 and Q31
  589. The floating-point complex FFT uses a mixed-radix algorithm. Multiple radix-4
  590. stages are performed along with a single radix-2 stage, as needed.
  591. The algorithm supports lengths of [16, 32, 64, ..., 4096] and each length uses
  592. a different twiddle factor table.
  593. @par
  594. The function uses the standard FFT definition and output values may grow by a
  595. factor of <code>fftLen</code> when computing the forward transform. The
  596. inverse transform includes a scale of <code>1/fftLen</code> as part of the
  597. calculation and this matches the textbook definition of the inverse FFT.
  598. @par
  599. Pre-initialized data structures containing twiddle factors and bit reversal
  600. tables are provided and defined in <code>arm_const_structs.h</code>. Include
  601. this header in your function and then pass one of the constant structures as
  602. an argument to arm_cfft_q31. For example:
  603. @par
  604. <code>arm_cfft_q31(arm_cfft_sR_q31_len64, pSrc, 1, 1)</code>
  605. @par
  606. computes a 64-point inverse complex FFT including bit reversal.
  607. The data structures are treated as constant data and not modified during the
  608. calculation. The same data structure can be reused for multiple transforms
  609. including mixing forward and inverse transforms.
  610. @par
  611. Earlier releases of the library provided separate radix-2 and radix-4
  612. algorithms that operated on floating-point data. These functions are still
  613. provided but are deprecated. The older functions are slower and less general
  614. than the new functions.
  615. @par
  616. An example of initialization of the constants for the arm_cfft_q31 function follows:
  617. @code
  618. const static arm_cfft_instance_q31 *S;
  619. ...
  620. switch (length) {
  621. case 16:
  622. S = &arm_cfft_sR_q31_len16;
  623. break;
  624. case 32:
  625. S = &arm_cfft_sR_q31_len32;
  626. break;
  627. case 64:
  628. S = &arm_cfft_sR_q31_len64;
  629. break;
  630. case 128:
  631. S = &arm_cfft_sR_q31_len128;
  632. break;
  633. case 256:
  634. S = &arm_cfft_sR_q31_len256;
  635. break;
  636. case 512:
  637. S = &arm_cfft_sR_q31_len512;
  638. break;
  639. case 1024:
  640. S = &arm_cfft_sR_q31_len1024;
  641. break;
  642. case 2048:
  643. S = &arm_cfft_sR_q31_len2048;
  644. break;
  645. case 4096:
  646. S = &arm_cfft_sR_q31_len4096;
  647. break;
  648. }
  649. @endcode
  650. */
  651. /**
  652. @addtogroup ComplexFFT
  653. @{
  654. */
  655. /**
  656. @brief Processing function for the floating-point complex FFT.
  657. @param[in] S points to an instance of the floating-point CFFT structure
  658. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  659. @param[in] ifftFlag flag that selects transform direction
  660. - value = 0: forward transform
  661. - value = 1: inverse transform
  662. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  663. - value = 0: disables bit reversal of output
  664. - value = 1: enables bit reversal of output
  665. @return none
  666. */
  667. void arm_cfft_f16(
  668. const arm_cfft_instance_f16 * S,
  669. float16_t * p1,
  670. uint8_t ifftFlag,
  671. uint8_t bitReverseFlag)
  672. {
  673. uint32_t L = S->fftLen, l;
  674. float16_t invL, * pSrc;
  675. if (ifftFlag == 1U)
  676. {
  677. /* Conjugate input data */
  678. pSrc = p1 + 1;
  679. for(l=0; l<L; l++)
  680. {
  681. *pSrc = -*pSrc;
  682. pSrc += 2;
  683. }
  684. }
  685. switch (L)
  686. {
  687. case 16:
  688. case 64:
  689. case 256:
  690. case 1024:
  691. case 4096:
  692. arm_radix4_butterfly_f16 (p1, L, (float16_t*)S->pTwiddle, 1U);
  693. break;
  694. case 32:
  695. case 128:
  696. case 512:
  697. case 2048:
  698. arm_cfft_radix4by2_f16 ( p1, L, (float16_t*)S->pTwiddle);
  699. break;
  700. }
  701. if ( bitReverseFlag )
  702. arm_bitreversal_16((uint16_t*)p1, S->bitRevLength,(uint16_t*)S->pBitRevTable);
  703. if (ifftFlag == 1U)
  704. {
  705. invL = 1.0f/(float16_t)L;
  706. /* Conjugate and scale output data */
  707. pSrc = p1;
  708. for(l=0; l<L; l++)
  709. {
  710. *pSrc++ *= invL ;
  711. *pSrc = -(*pSrc) * invL;
  712. pSrc++;
  713. }
  714. }
  715. }
  716. #endif /* if defined(ARM_FLOAT16_SUPPORTED) */
  717. #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */
  718. /**
  719. @} end of ComplexFFT group
  720. */