arm_cfft_q15.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cfft_q15.c
  4. * Description: Combined Radix Decimation in Q15 Frequency CFFT processing function
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/transform_functions.h"
  29. #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
  30. #include "arm_vec_fft.h"
  31. static void _arm_radix4_butterfly_q15_mve(
  32. const arm_cfft_instance_q15 * S,
  33. q15_t *pSrc,
  34. uint32_t fftLen)
  35. {
  36. q15x8_t vecTmp0, vecTmp1;
  37. q15x8_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  38. q15x8_t vecA, vecB, vecC, vecD;
  39. q15x8_t vecW;
  40. uint32_t blkCnt;
  41. uint32_t n1, n2;
  42. uint32_t stage = 0;
  43. int32_t iter = 1;
  44. static const uint32_t strides[4] = {
  45. (0 - 16) * sizeof(q15_t *), (4 - 16) * sizeof(q15_t *),
  46. (8 - 16) * sizeof(q15_t *), (12 - 16) * sizeof(q15_t *)
  47. };
  48. /*
  49. * Process first stages
  50. * Each stage in middle stages provides two down scaling of the input
  51. */
  52. n2 = fftLen;
  53. n1 = n2;
  54. n2 >>= 2u;
  55. for (int k = fftLen / 4u; k > 1; k >>= 2u)
  56. {
  57. for (int i = 0; i < iter; i++)
  58. {
  59. q15_t const *p_rearranged_twiddle_tab_stride2 =
  60. &S->rearranged_twiddle_stride2[
  61. S->rearranged_twiddle_tab_stride2_arr[stage]];
  62. q15_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[
  63. S->rearranged_twiddle_tab_stride3_arr[stage]];
  64. q15_t const *p_rearranged_twiddle_tab_stride1 =
  65. &S->rearranged_twiddle_stride1[
  66. S->rearranged_twiddle_tab_stride1_arr[stage]];
  67. q15_t const *pW1, *pW2, *pW3;
  68. q15_t *inA = pSrc + CMPLX_DIM * i * n1;
  69. q15_t *inB = inA + n2 * CMPLX_DIM;
  70. q15_t *inC = inB + n2 * CMPLX_DIM;
  71. q15_t *inD = inC + n2 * CMPLX_DIM;
  72. pW1 = p_rearranged_twiddle_tab_stride1;
  73. pW2 = p_rearranged_twiddle_tab_stride2;
  74. pW3 = p_rearranged_twiddle_tab_stride3;
  75. blkCnt = n2 / 4;
  76. /*
  77. * load 4 x q15 complex pair
  78. */
  79. vecA = vldrhq_s16(inA);
  80. vecC = vldrhq_s16(inC);
  81. while (blkCnt > 0U)
  82. {
  83. vecB = vldrhq_s16(inB);
  84. vecD = vldrhq_s16(inD);
  85. vecSum0 = vhaddq(vecA, vecC);
  86. vecDiff0 = vhsubq(vecA, vecC);
  87. vecSum1 = vhaddq(vecB, vecD);
  88. vecDiff1 = vhsubq(vecB, vecD);
  89. /*
  90. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  91. */
  92. vecTmp0 = vhaddq(vecSum0, vecSum1);
  93. vst1q(inA, vecTmp0);
  94. inA += 8;
  95. /*
  96. * [ 1 -1 1 -1 ] * [ A B C D ]'
  97. */
  98. vecTmp0 = vhsubq(vecSum0, vecSum1);
  99. /*
  100. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  101. */
  102. vecW = vld1q(pW2);
  103. pW2 += 8;
  104. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0);
  105. vst1q(inB, vecTmp1);
  106. inB += 8;
  107. /*
  108. * [ 1 -i -1 +i ] * [ A B C D ]'
  109. */
  110. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  111. /*
  112. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  113. */
  114. vecW = vld1q(pW1);
  115. pW1 += 8;
  116. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0);
  117. vst1q(inC, vecTmp1);
  118. inC += 8;
  119. /*
  120. * [ 1 +i -1 -i ] * [ A B C D ]'
  121. */
  122. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  123. /*
  124. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  125. */
  126. vecW = vld1q(pW3);
  127. pW3 += 8;
  128. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0);
  129. vst1q(inD, vecTmp1);
  130. inD += 8;
  131. vecA = vldrhq_s16(inA);
  132. vecC = vldrhq_s16(inC);
  133. blkCnt--;
  134. }
  135. }
  136. n1 = n2;
  137. n2 >>= 2u;
  138. iter = iter << 2;
  139. stage++;
  140. }
  141. /*
  142. * start of Last stage process
  143. */
  144. uint32x4_t vecScGathAddr = vld1q_u32 (strides);
  145. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  146. /*
  147. * load scheduling
  148. */
  149. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  150. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  151. blkCnt = (fftLen >> 4);
  152. while (blkCnt > 0U)
  153. {
  154. vecSum0 = vhaddq(vecA, vecC);
  155. vecDiff0 = vhsubq(vecA, vecC);
  156. vecB = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 4);
  157. vecD = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 12);
  158. vecSum1 = vhaddq(vecB, vecD);
  159. vecDiff1 = vhsubq(vecB, vecD);
  160. /*
  161. * pre-load for next iteration
  162. */
  163. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  164. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  165. vecTmp0 = vhaddq(vecSum0, vecSum1);
  166. vstrwq_scatter_base_s32(vecScGathAddr, -64, (q15x8_t) vecTmp0);
  167. vecTmp0 = vhsubq(vecSum0, vecSum1);
  168. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 4, (q15x8_t) vecTmp0);
  169. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  170. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, (q15x8_t) vecTmp0);
  171. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  172. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 12, (q15x8_t) vecTmp0);
  173. blkCnt--;
  174. }
  175. }
  176. static void arm_cfft_radix4by2_q15_mve(const arm_cfft_instance_q15 *S, q15_t *pSrc, uint32_t fftLen)
  177. {
  178. uint32_t n2;
  179. q15_t *pIn0;
  180. q15_t *pIn1;
  181. const q15_t *pCoef = S->pTwiddle;
  182. uint32_t blkCnt;
  183. q15x8_t vecIn0, vecIn1, vecSum, vecDiff;
  184. q15x8_t vecCmplxTmp, vecTw;
  185. q15_t const *pCoefVec;
  186. n2 = fftLen >> 1;
  187. pIn0 = pSrc;
  188. pIn1 = pSrc + fftLen;
  189. pCoefVec = pCoef;
  190. blkCnt = n2 / 4;
  191. while (blkCnt > 0U)
  192. {
  193. vecIn0 = *(q15x8_t *) pIn0;
  194. vecIn1 = *(q15x8_t *) pIn1;
  195. vecIn0 = vecIn0 >> 1;
  196. vecIn1 = vecIn1 >> 1;
  197. vecSum = vhaddq(vecIn0, vecIn1);
  198. vst1q(pIn0, vecSum);
  199. pIn0 += 8;
  200. vecTw = vld1q(pCoefVec);
  201. pCoefVec += 8;
  202. vecDiff = vhsubq(vecIn0, vecIn1);
  203. vecCmplxTmp = MVE_CMPLX_MULT_FX_AxConjB(vecDiff, vecTw);
  204. vst1q(pIn1, vecCmplxTmp);
  205. pIn1 += 8;
  206. blkCnt--;
  207. }
  208. _arm_radix4_butterfly_q15_mve(S, pSrc, n2);
  209. _arm_radix4_butterfly_q15_mve(S, pSrc + fftLen, n2);
  210. pIn0 = pSrc;
  211. blkCnt = (fftLen << 1) >> 3;
  212. while (blkCnt > 0U)
  213. {
  214. vecIn0 = *(q15x8_t *) pIn0;
  215. vecIn0 = vecIn0 << 1;
  216. vst1q(pIn0, vecIn0);
  217. pIn0 += 8;
  218. blkCnt--;
  219. }
  220. /*
  221. * tail
  222. * (will be merged thru tail predication)
  223. */
  224. blkCnt = (fftLen << 1) & 7;
  225. if (blkCnt > 0U)
  226. {
  227. mve_pred16_t p0 = vctp16q(blkCnt);
  228. vecIn0 = *(q15x8_t *) pIn0;
  229. vecIn0 = vecIn0 << 1;
  230. vstrhq_p(pIn0, vecIn0, p0);
  231. }
  232. }
  233. static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S,q15_t *pSrc, uint32_t fftLen)
  234. {
  235. q15x8_t vecTmp0, vecTmp1;
  236. q15x8_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  237. q15x8_t vecA, vecB, vecC, vecD;
  238. q15x8_t vecW;
  239. uint32_t blkCnt;
  240. uint32_t n1, n2;
  241. uint32_t stage = 0;
  242. int32_t iter = 1;
  243. static const uint32_t strides[4] = {
  244. (0 - 16) * sizeof(q15_t *), (4 - 16) * sizeof(q15_t *),
  245. (8 - 16) * sizeof(q15_t *), (12 - 16) * sizeof(q15_t *)
  246. };
  247. /*
  248. * Process first stages
  249. * Each stage in middle stages provides two down scaling of the input
  250. */
  251. n2 = fftLen;
  252. n1 = n2;
  253. n2 >>= 2u;
  254. for (int k = fftLen / 4u; k > 1; k >>= 2u)
  255. {
  256. for (int i = 0; i < iter; i++)
  257. {
  258. q15_t const *p_rearranged_twiddle_tab_stride2 =
  259. &S->rearranged_twiddle_stride2[
  260. S->rearranged_twiddle_tab_stride2_arr[stage]];
  261. q15_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[
  262. S->rearranged_twiddle_tab_stride3_arr[stage]];
  263. q15_t const *p_rearranged_twiddle_tab_stride1 =
  264. &S->rearranged_twiddle_stride1[
  265. S->rearranged_twiddle_tab_stride1_arr[stage]];
  266. q15_t const *pW1, *pW2, *pW3;
  267. q15_t *inA = pSrc + CMPLX_DIM * i * n1;
  268. q15_t *inB = inA + n2 * CMPLX_DIM;
  269. q15_t *inC = inB + n2 * CMPLX_DIM;
  270. q15_t *inD = inC + n2 * CMPLX_DIM;
  271. pW1 = p_rearranged_twiddle_tab_stride1;
  272. pW2 = p_rearranged_twiddle_tab_stride2;
  273. pW3 = p_rearranged_twiddle_tab_stride3;
  274. blkCnt = n2 / 4;
  275. /*
  276. * load 4 x q15 complex pair
  277. */
  278. vecA = vldrhq_s16(inA);
  279. vecC = vldrhq_s16(inC);
  280. while (blkCnt > 0U)
  281. {
  282. vecB = vldrhq_s16(inB);
  283. vecD = vldrhq_s16(inD);
  284. vecSum0 = vhaddq(vecA, vecC);
  285. vecDiff0 = vhsubq(vecA, vecC);
  286. vecSum1 = vhaddq(vecB, vecD);
  287. vecDiff1 = vhsubq(vecB, vecD);
  288. /*
  289. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  290. */
  291. vecTmp0 = vhaddq(vecSum0, vecSum1);
  292. vst1q(inA, vecTmp0);
  293. inA += 8;
  294. /*
  295. * [ 1 -1 1 -1 ] * [ A B C D ]'
  296. */
  297. vecTmp0 = vhsubq(vecSum0, vecSum1);
  298. /*
  299. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  300. */
  301. vecW = vld1q(pW2);
  302. pW2 += 8;
  303. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW);
  304. vst1q(inB, vecTmp1);
  305. inB += 8;
  306. /*
  307. * [ 1 -i -1 +i ] * [ A B C D ]'
  308. */
  309. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  310. /*
  311. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  312. */
  313. vecW = vld1q(pW1);
  314. pW1 += 8;
  315. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW);
  316. vst1q(inC, vecTmp1);
  317. inC += 8;
  318. /*
  319. * [ 1 +i -1 -i ] * [ A B C D ]'
  320. */
  321. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  322. /*
  323. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  324. */
  325. vecW = vld1q(pW3);
  326. pW3 += 8;
  327. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW);
  328. vst1q(inD, vecTmp1);
  329. inD += 8;
  330. vecA = vldrhq_s16(inA);
  331. vecC = vldrhq_s16(inC);
  332. blkCnt--;
  333. }
  334. }
  335. n1 = n2;
  336. n2 >>= 2u;
  337. iter = iter << 2;
  338. stage++;
  339. }
  340. /*
  341. * start of Last stage process
  342. */
  343. uint32x4_t vecScGathAddr = vld1q_u32(strides);
  344. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  345. /*
  346. * load scheduling
  347. */
  348. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  349. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  350. blkCnt = (fftLen >> 4);
  351. while (blkCnt > 0U)
  352. {
  353. vecSum0 = vhaddq(vecA, vecC);
  354. vecDiff0 = vhsubq(vecA, vecC);
  355. vecB = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 4);
  356. vecD = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 12);
  357. vecSum1 = vhaddq(vecB, vecD);
  358. vecDiff1 = vhsubq(vecB, vecD);
  359. /*
  360. * pre-load for next iteration
  361. */
  362. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  363. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  364. vecTmp0 = vhaddq(vecSum0, vecSum1);
  365. vstrwq_scatter_base_s32(vecScGathAddr, -64, (q15x8_t) vecTmp0);
  366. vecTmp0 = vhsubq(vecSum0, vecSum1);
  367. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 4, (q15x8_t) vecTmp0);
  368. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  369. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, (q15x8_t) vecTmp0);
  370. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  371. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 12, (q15x8_t) vecTmp0);
  372. blkCnt--;
  373. }
  374. }
  375. static void arm_cfft_radix4by2_inverse_q15_mve(const arm_cfft_instance_q15 *S, q15_t *pSrc, uint32_t fftLen)
  376. {
  377. uint32_t n2;
  378. q15_t *pIn0;
  379. q15_t *pIn1;
  380. const q15_t *pCoef = S->pTwiddle;
  381. uint32_t blkCnt;
  382. q15x8_t vecIn0, vecIn1, vecSum, vecDiff;
  383. q15x8_t vecCmplxTmp, vecTw;
  384. q15_t const *pCoefVec;
  385. n2 = fftLen >> 1;
  386. pIn0 = pSrc;
  387. pIn1 = pSrc + fftLen;
  388. pCoefVec = pCoef;
  389. blkCnt = n2 / 4;
  390. while (blkCnt > 0U)
  391. {
  392. vecIn0 = *(q15x8_t *) pIn0;
  393. vecIn1 = *(q15x8_t *) pIn1;
  394. vecIn0 = vecIn0 >> 1;
  395. vecIn1 = vecIn1 >> 1;
  396. vecSum = vhaddq(vecIn0, vecIn1);
  397. vst1q(pIn0, vecSum);
  398. pIn0 += 8;
  399. vecTw = vld1q(pCoefVec);
  400. pCoefVec += 8;
  401. vecDiff = vhsubq(vecIn0, vecIn1);
  402. vecCmplxTmp = vqrdmlsdhq(vuninitializedq_s16() , vecDiff, vecTw);
  403. vecCmplxTmp = vqrdmladhxq(vecCmplxTmp, vecDiff, vecTw);
  404. vst1q(pIn1, vecCmplxTmp);
  405. pIn1 += 8;
  406. blkCnt--;
  407. }
  408. _arm_radix4_butterfly_inverse_q15_mve(S, pSrc, n2);
  409. _arm_radix4_butterfly_inverse_q15_mve(S, pSrc + fftLen, n2);
  410. pIn0 = pSrc;
  411. blkCnt = (fftLen << 1) >> 3;
  412. while (blkCnt > 0U)
  413. {
  414. vecIn0 = *(q15x8_t *) pIn0;
  415. vecIn0 = vecIn0 << 1;
  416. vst1q(pIn0, vecIn0);
  417. pIn0 += 8;
  418. blkCnt--;
  419. }
  420. /*
  421. * tail
  422. * (will be merged thru tail predication)
  423. */
  424. blkCnt = (fftLen << 1) & 7;
  425. while (blkCnt > 0U)
  426. {
  427. mve_pred16_t p0 = vctp16q(blkCnt);
  428. vecIn0 = *(q15x8_t *) pIn0;
  429. vecIn0 = vecIn0 << 1;
  430. vstrhq_p(pIn0, vecIn0, p0);
  431. }
  432. }
  433. /**
  434. @ingroup groupTransforms
  435. */
  436. /**
  437. @addtogroup ComplexFFT
  438. @{
  439. */
  440. /**
  441. @brief Processing function for Q15 complex FFT.
  442. @param[in] S points to an instance of Q15 CFFT structure
  443. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  444. @param[in] ifftFlag flag that selects transform direction
  445. - value = 0: forward transform
  446. - value = 1: inverse transform
  447. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  448. - value = 0: disables bit reversal of output
  449. - value = 1: enables bit reversal of output
  450. @return none
  451. */
  452. void arm_cfft_q15(
  453. const arm_cfft_instance_q15 * S,
  454. q15_t * pSrc,
  455. uint8_t ifftFlag,
  456. uint8_t bitReverseFlag)
  457. {
  458. uint32_t fftLen = S->fftLen;
  459. if (ifftFlag == 1U) {
  460. switch (fftLen) {
  461. case 16:
  462. case 64:
  463. case 256:
  464. case 1024:
  465. case 4096:
  466. _arm_radix4_butterfly_inverse_q15_mve(S, pSrc, fftLen);
  467. break;
  468. case 32:
  469. case 128:
  470. case 512:
  471. case 2048:
  472. arm_cfft_radix4by2_inverse_q15_mve(S, pSrc, fftLen);
  473. break;
  474. }
  475. } else {
  476. switch (fftLen) {
  477. case 16:
  478. case 64:
  479. case 256:
  480. case 1024:
  481. case 4096:
  482. _arm_radix4_butterfly_q15_mve(S, pSrc, fftLen);
  483. break;
  484. case 32:
  485. case 128:
  486. case 512:
  487. case 2048:
  488. arm_cfft_radix4by2_q15_mve(S, pSrc, fftLen);
  489. break;
  490. }
  491. }
  492. if (bitReverseFlag)
  493. {
  494. arm_bitreversal_16_inpl_mve((uint16_t*)pSrc, S->bitRevLength, S->pBitRevTable);
  495. }
  496. }
  497. #else
  498. extern void arm_radix4_butterfly_q15(
  499. q15_t * pSrc,
  500. uint32_t fftLen,
  501. const q15_t * pCoef,
  502. uint32_t twidCoefModifier);
  503. extern void arm_radix4_butterfly_inverse_q15(
  504. q15_t * pSrc,
  505. uint32_t fftLen,
  506. const q15_t * pCoef,
  507. uint32_t twidCoefModifier);
  508. extern void arm_bitreversal_16(
  509. uint16_t * pSrc,
  510. const uint16_t bitRevLen,
  511. const uint16_t * pBitRevTable);
  512. void arm_cfft_radix4by2_q15(
  513. q15_t * pSrc,
  514. uint32_t fftLen,
  515. const q15_t * pCoef);
  516. void arm_cfft_radix4by2_inverse_q15(
  517. q15_t * pSrc,
  518. uint32_t fftLen,
  519. const q15_t * pCoef);
  520. /**
  521. @ingroup groupTransforms
  522. */
  523. /**
  524. @addtogroup ComplexFFT
  525. @{
  526. */
  527. /**
  528. @brief Processing function for Q15 complex FFT.
  529. @param[in] S points to an instance of Q15 CFFT structure
  530. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  531. @param[in] ifftFlag flag that selects transform direction
  532. - value = 0: forward transform
  533. - value = 1: inverse transform
  534. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  535. - value = 0: disables bit reversal of output
  536. - value = 1: enables bit reversal of output
  537. @return none
  538. */
  539. void arm_cfft_q15(
  540. const arm_cfft_instance_q15 * S,
  541. q15_t * p1,
  542. uint8_t ifftFlag,
  543. uint8_t bitReverseFlag)
  544. {
  545. uint32_t L = S->fftLen;
  546. if (ifftFlag == 1U)
  547. {
  548. switch (L)
  549. {
  550. case 16:
  551. case 64:
  552. case 256:
  553. case 1024:
  554. case 4096:
  555. arm_radix4_butterfly_inverse_q15 ( p1, L, (q15_t*)S->pTwiddle, 1 );
  556. break;
  557. case 32:
  558. case 128:
  559. case 512:
  560. case 2048:
  561. arm_cfft_radix4by2_inverse_q15 ( p1, L, S->pTwiddle );
  562. break;
  563. }
  564. }
  565. else
  566. {
  567. switch (L)
  568. {
  569. case 16:
  570. case 64:
  571. case 256:
  572. case 1024:
  573. case 4096:
  574. arm_radix4_butterfly_q15 ( p1, L, (q15_t*)S->pTwiddle, 1 );
  575. break;
  576. case 32:
  577. case 128:
  578. case 512:
  579. case 2048:
  580. arm_cfft_radix4by2_q15 ( p1, L, S->pTwiddle );
  581. break;
  582. }
  583. }
  584. if ( bitReverseFlag )
  585. arm_bitreversal_16 ((uint16_t*) p1, S->bitRevLength, S->pBitRevTable);
  586. }
  587. /**
  588. @} end of ComplexFFT group
  589. */
  590. void arm_cfft_radix4by2_q15(
  591. q15_t * pSrc,
  592. uint32_t fftLen,
  593. const q15_t * pCoef)
  594. {
  595. uint32_t i;
  596. uint32_t n2;
  597. q15_t p0, p1, p2, p3;
  598. #if defined (ARM_MATH_DSP)
  599. q31_t T, S, R;
  600. q31_t coeff, out1, out2;
  601. const q15_t *pC = pCoef;
  602. q15_t *pSi = pSrc;
  603. q15_t *pSl = pSrc + fftLen;
  604. #else
  605. uint32_t l;
  606. q15_t xt, yt, cosVal, sinVal;
  607. #endif
  608. n2 = fftLen >> 1U;
  609. #if defined (ARM_MATH_DSP)
  610. for (i = n2; i > 0; i--)
  611. {
  612. coeff = read_q15x2_ia ((q15_t **) &pC);
  613. T = read_q15x2 (pSi);
  614. T = __SHADD16(T, 0); /* this is just a SIMD arithmetic shift right by 1 */
  615. S = read_q15x2 (pSl);
  616. S = __SHADD16(S, 0); /* this is just a SIMD arithmetic shift right by 1 */
  617. R = __QSUB16(T, S);
  618. write_q15x2_ia (&pSi, __SHADD16(T, S));
  619. #ifndef ARM_MATH_BIG_ENDIAN
  620. out1 = __SMUAD(coeff, R) >> 16U;
  621. out2 = __SMUSDX(coeff, R);
  622. #else
  623. out1 = __SMUSDX(R, coeff) >> 16U;
  624. out2 = __SMUAD(coeff, R);
  625. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  626. write_q15x2_ia (&pSl, (q31_t)__PKHBT( out1, out2, 0 ) );
  627. }
  628. #else /* #if defined (ARM_MATH_DSP) */
  629. for (i = 0; i < n2; i++)
  630. {
  631. cosVal = pCoef[2 * i];
  632. sinVal = pCoef[2 * i + 1];
  633. l = i + n2;
  634. xt = (pSrc[2 * i] >> 1U) - (pSrc[2 * l] >> 1U);
  635. pSrc[2 * i] = ((pSrc[2 * i] >> 1U) + (pSrc[2 * l] >> 1U)) >> 1U;
  636. yt = (pSrc[2 * i + 1] >> 1U) - (pSrc[2 * l + 1] >> 1U);
  637. pSrc[2 * i + 1] = ((pSrc[2 * l + 1] >> 1U) + (pSrc[2 * i + 1] >> 1U)) >> 1U;
  638. pSrc[2 * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16U)) +
  639. ((int16_t) (((q31_t) yt * sinVal) >> 16U)) );
  640. pSrc[2 * l + 1] = (((int16_t) (((q31_t) yt * cosVal) >> 16U)) -
  641. ((int16_t) (((q31_t) xt * sinVal) >> 16U)) );
  642. }
  643. #endif /* #if defined (ARM_MATH_DSP) */
  644. /* first col */
  645. arm_radix4_butterfly_q15( pSrc, n2, (q15_t*)pCoef, 2U);
  646. /* second col */
  647. arm_radix4_butterfly_q15( pSrc + fftLen, n2, (q15_t*)pCoef, 2U);
  648. n2 = fftLen >> 1U;
  649. for (i = 0; i < n2; i++)
  650. {
  651. p0 = pSrc[4 * i + 0];
  652. p1 = pSrc[4 * i + 1];
  653. p2 = pSrc[4 * i + 2];
  654. p3 = pSrc[4 * i + 3];
  655. p0 <<= 1U;
  656. p1 <<= 1U;
  657. p2 <<= 1U;
  658. p3 <<= 1U;
  659. pSrc[4 * i + 0] = p0;
  660. pSrc[4 * i + 1] = p1;
  661. pSrc[4 * i + 2] = p2;
  662. pSrc[4 * i + 3] = p3;
  663. }
  664. }
  665. void arm_cfft_radix4by2_inverse_q15(
  666. q15_t * pSrc,
  667. uint32_t fftLen,
  668. const q15_t * pCoef)
  669. {
  670. uint32_t i;
  671. uint32_t n2;
  672. q15_t p0, p1, p2, p3;
  673. #if defined (ARM_MATH_DSP)
  674. q31_t T, S, R;
  675. q31_t coeff, out1, out2;
  676. const q15_t *pC = pCoef;
  677. q15_t *pSi = pSrc;
  678. q15_t *pSl = pSrc + fftLen;
  679. #else
  680. uint32_t l;
  681. q15_t xt, yt, cosVal, sinVal;
  682. #endif
  683. n2 = fftLen >> 1U;
  684. #if defined (ARM_MATH_DSP)
  685. for (i = n2; i > 0; i--)
  686. {
  687. coeff = read_q15x2_ia ((q15_t **) &pC);
  688. T = read_q15x2 (pSi);
  689. T = __SHADD16(T, 0); /* this is just a SIMD arithmetic shift right by 1 */
  690. S = read_q15x2 (pSl);
  691. S = __SHADD16(S, 0); /* this is just a SIMD arithmetic shift right by 1 */
  692. R = __QSUB16(T, S);
  693. write_q15x2_ia (&pSi, __SHADD16(T, S));
  694. #ifndef ARM_MATH_BIG_ENDIAN
  695. out1 = __SMUSD(coeff, R) >> 16U;
  696. out2 = __SMUADX(coeff, R);
  697. #else
  698. out1 = __SMUADX(R, coeff) >> 16U;
  699. out2 = __SMUSD(__QSUB(0, coeff), R);
  700. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  701. write_q15x2_ia (&pSl, (q31_t)__PKHBT( out1, out2, 0 ));
  702. }
  703. #else /* #if defined (ARM_MATH_DSP) */
  704. for (i = 0; i < n2; i++)
  705. {
  706. cosVal = pCoef[2 * i];
  707. sinVal = pCoef[2 * i + 1];
  708. l = i + n2;
  709. xt = (pSrc[2 * i] >> 1U) - (pSrc[2 * l] >> 1U);
  710. pSrc[2 * i] = ((pSrc[2 * i] >> 1U) + (pSrc[2 * l] >> 1U)) >> 1U;
  711. yt = (pSrc[2 * i + 1] >> 1U) - (pSrc[2 * l + 1] >> 1U);
  712. pSrc[2 * i + 1] = ((pSrc[2 * l + 1] >> 1U) + (pSrc[2 * i + 1] >> 1U)) >> 1U;
  713. pSrc[2 * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16U)) -
  714. ((int16_t) (((q31_t) yt * sinVal) >> 16U)) );
  715. pSrc[2 * l + 1] = (((int16_t) (((q31_t) yt * cosVal) >> 16U)) +
  716. ((int16_t) (((q31_t) xt * sinVal) >> 16U)) );
  717. }
  718. #endif /* #if defined (ARM_MATH_DSP) */
  719. /* first col */
  720. arm_radix4_butterfly_inverse_q15( pSrc, n2, (q15_t*)pCoef, 2U);
  721. /* second col */
  722. arm_radix4_butterfly_inverse_q15( pSrc + fftLen, n2, (q15_t*)pCoef, 2U);
  723. n2 = fftLen >> 1U;
  724. for (i = 0; i < n2; i++)
  725. {
  726. p0 = pSrc[4 * i + 0];
  727. p1 = pSrc[4 * i + 1];
  728. p2 = pSrc[4 * i + 2];
  729. p3 = pSrc[4 * i + 3];
  730. p0 <<= 1U;
  731. p1 <<= 1U;
  732. p2 <<= 1U;
  733. p3 <<= 1U;
  734. pSrc[4 * i + 0] = p0;
  735. pSrc[4 * i + 1] = p1;
  736. pSrc[4 * i + 2] = p2;
  737. pSrc[4 * i + 3] = p3;
  738. }
  739. }
  740. #endif /* defined(ARM_MATH_MVEI) */