arm_cfft_q15.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cfft_q15.c
  4. * Description: Combined Radix Decimation in Q15 Frequency CFFT processing function
  5. *
  6. * $Date: 23 April 2021
  7. * $Revision: V1.9.0
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "dsp/transform_functions.h"
  29. #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE)
  30. #include "arm_vec_fft.h"
  31. static void _arm_radix4_butterfly_q15_mve(
  32. const arm_cfft_instance_q15 * S,
  33. q15_t *pSrc,
  34. uint32_t fftLen)
  35. {
  36. q15x8_t vecTmp0, vecTmp1;
  37. q15x8_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  38. q15x8_t vecA, vecB, vecC, vecD;
  39. uint32_t blkCnt;
  40. uint32_t n1, n2;
  41. uint32_t stage = 0;
  42. int32_t iter = 1;
  43. static const int32_t strides[4] = {
  44. (0 - 16) * (int32_t)sizeof(q15_t *), (4 - 16) * (int32_t)sizeof(q15_t *),
  45. (8 - 16) * (int32_t)sizeof(q15_t *), (12 - 16) * (int32_t)sizeof(q15_t *)
  46. };
  47. /*
  48. * Process first stages
  49. * Each stage in middle stages provides two down scaling of the input
  50. */
  51. n2 = fftLen;
  52. n1 = n2;
  53. n2 >>= 2u;
  54. for (int k = fftLen / 4u; k > 1; k >>= 2u)
  55. {
  56. q15_t const *p_rearranged_twiddle_tab_stride2 =
  57. &S->rearranged_twiddle_stride2[
  58. S->rearranged_twiddle_tab_stride2_arr[stage]];
  59. q15_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[
  60. S->rearranged_twiddle_tab_stride3_arr[stage]];
  61. q15_t const *p_rearranged_twiddle_tab_stride1 =
  62. &S->rearranged_twiddle_stride1[
  63. S->rearranged_twiddle_tab_stride1_arr[stage]];
  64. q15_t * pBase = pSrc;
  65. for (int i = 0; i < iter; i++)
  66. {
  67. q15_t *inA = pBase;
  68. q15_t *inB = inA + n2 * CMPLX_DIM;
  69. q15_t *inC = inB + n2 * CMPLX_DIM;
  70. q15_t *inD = inC + n2 * CMPLX_DIM;
  71. q15_t const *pW1 = p_rearranged_twiddle_tab_stride1;
  72. q15_t const *pW2 = p_rearranged_twiddle_tab_stride2;
  73. q15_t const *pW3 = p_rearranged_twiddle_tab_stride3;
  74. q15x8_t vecW;
  75. blkCnt = n2 / 4;
  76. /*
  77. * load 4 x q15 complex pair
  78. */
  79. vecA = vldrhq_s16(inA);
  80. vecC = vldrhq_s16(inC);
  81. while (blkCnt > 0U)
  82. {
  83. vecB = vldrhq_s16(inB);
  84. vecD = vldrhq_s16(inD);
  85. vecSum0 = vhaddq(vecA, vecC);
  86. vecDiff0 = vhsubq(vecA, vecC);
  87. vecSum1 = vhaddq(vecB, vecD);
  88. vecDiff1 = vhsubq(vecB, vecD);
  89. /*
  90. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  91. */
  92. vecTmp0 = vhaddq(vecSum0, vecSum1);
  93. vst1q(inA, vecTmp0);
  94. inA += 8;
  95. /*
  96. * [ 1 -1 1 -1 ] * [ A B C D ]'
  97. */
  98. vecTmp0 = vhsubq(vecSum0, vecSum1);
  99. /*
  100. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  101. */
  102. vecW = vld1q(pW2);
  103. pW2 += 8;
  104. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q15x8_t);
  105. vst1q(inB, vecTmp1);
  106. inB += 8;
  107. /*
  108. * [ 1 -i -1 +i ] * [ A B C D ]'
  109. */
  110. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  111. /*
  112. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  113. */
  114. vecW = vld1q(pW1);
  115. pW1 += 8;
  116. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q15x8_t);
  117. vst1q(inC, vecTmp1);
  118. inC += 8;
  119. /*
  120. * [ 1 +i -1 -i ] * [ A B C D ]'
  121. */
  122. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  123. /*
  124. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  125. */
  126. vecW = vld1q(pW3);
  127. pW3 += 8;
  128. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q15x8_t);
  129. vst1q(inD, vecTmp1);
  130. inD += 8;
  131. vecA = vldrhq_s16(inA);
  132. vecC = vldrhq_s16(inC);
  133. blkCnt--;
  134. }
  135. pBase += CMPLX_DIM * n1;
  136. }
  137. n1 = n2;
  138. n2 >>= 2u;
  139. iter = iter << 2;
  140. stage++;
  141. }
  142. /*
  143. * start of Last stage process
  144. */
  145. uint32x4_t vecScGathAddr = vld1q_u32 ((uint32_t*)strides);
  146. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  147. /*
  148. * load scheduling
  149. */
  150. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  151. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  152. blkCnt = (fftLen >> 4);
  153. while (blkCnt > 0U)
  154. {
  155. vecSum0 = vhaddq(vecA, vecC);
  156. vecDiff0 = vhsubq(vecA, vecC);
  157. vecB = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 4);
  158. vecD = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 12);
  159. vecSum1 = vhaddq(vecB, vecD);
  160. vecDiff1 = vhsubq(vecB, vecD);
  161. /*
  162. * pre-load for next iteration
  163. */
  164. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  165. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  166. vecTmp0 = vhaddq(vecSum0, vecSum1);
  167. vstrwq_scatter_base_s32(vecScGathAddr, -64, (int32x4_t) vecTmp0);
  168. vecTmp0 = vhsubq(vecSum0, vecSum1);
  169. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 4, (int32x4_t) vecTmp0);
  170. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  171. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, (int32x4_t) vecTmp0);
  172. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  173. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 12, (int32x4_t) vecTmp0);
  174. blkCnt--;
  175. }
  176. }
  177. static void arm_cfft_radix4by2_q15_mve(const arm_cfft_instance_q15 *S, q15_t *pSrc, uint32_t fftLen)
  178. {
  179. uint32_t n2;
  180. q15_t *pIn0;
  181. q15_t *pIn1;
  182. const q15_t *pCoef = S->pTwiddle;
  183. uint32_t blkCnt;
  184. q15x8_t vecIn0, vecIn1, vecSum, vecDiff;
  185. q15x8_t vecCmplxTmp, vecTw;
  186. q15_t const *pCoefVec;
  187. n2 = fftLen >> 1;
  188. pIn0 = pSrc;
  189. pIn1 = pSrc + fftLen;
  190. pCoefVec = pCoef;
  191. blkCnt = n2 / 4;
  192. while (blkCnt > 0U)
  193. {
  194. vecIn0 = *(q15x8_t *) pIn0;
  195. vecIn1 = *(q15x8_t *) pIn1;
  196. vecIn0 = vecIn0 >> 1;
  197. vecIn1 = vecIn1 >> 1;
  198. vecSum = vhaddq(vecIn0, vecIn1);
  199. vst1q(pIn0, vecSum);
  200. pIn0 += 8;
  201. vecTw = vld1q(pCoefVec);
  202. pCoefVec += 8;
  203. vecDiff = vhsubq(vecIn0, vecIn1);
  204. vecCmplxTmp = MVE_CMPLX_MULT_FX_AxConjB(vecDiff, vecTw, q15x8_t);
  205. vst1q(pIn1, vecCmplxTmp);
  206. pIn1 += 8;
  207. blkCnt--;
  208. }
  209. _arm_radix4_butterfly_q15_mve(S, pSrc, n2);
  210. _arm_radix4_butterfly_q15_mve(S, pSrc + fftLen, n2);
  211. pIn0 = pSrc;
  212. blkCnt = (fftLen << 1) >> 3;
  213. while (blkCnt > 0U)
  214. {
  215. vecIn0 = *(q15x8_t *) pIn0;
  216. vecIn0 = vecIn0 << 1;
  217. vst1q(pIn0, vecIn0);
  218. pIn0 += 8;
  219. blkCnt--;
  220. }
  221. /*
  222. * tail
  223. * (will be merged thru tail predication)
  224. */
  225. blkCnt = (fftLen << 1) & 7;
  226. if (blkCnt > 0U)
  227. {
  228. mve_pred16_t p0 = vctp16q(blkCnt);
  229. vecIn0 = *(q15x8_t *) pIn0;
  230. vecIn0 = vecIn0 << 1;
  231. vstrhq_p(pIn0, vecIn0, p0);
  232. }
  233. }
  234. static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S,q15_t *pSrc, uint32_t fftLen)
  235. {
  236. q15x8_t vecTmp0, vecTmp1;
  237. q15x8_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  238. q15x8_t vecA, vecB, vecC, vecD;
  239. uint32_t blkCnt;
  240. uint32_t n1, n2;
  241. uint32_t stage = 0;
  242. int32_t iter = 1;
  243. static const int32_t strides[4] = {
  244. (0 - 16) * (int32_t)sizeof(q15_t *), (4 - 16) * (int32_t)sizeof(q15_t *),
  245. (8 - 16) * (int32_t)sizeof(q15_t *), (12 - 16) * (int32_t)sizeof(q15_t *)
  246. };
  247. /*
  248. * Process first stages
  249. * Each stage in middle stages provides two down scaling of the input
  250. */
  251. n2 = fftLen;
  252. n1 = n2;
  253. n2 >>= 2u;
  254. for (int k = fftLen / 4u; k > 1; k >>= 2u)
  255. {
  256. q15_t const *p_rearranged_twiddle_tab_stride2 =
  257. &S->rearranged_twiddle_stride2[
  258. S->rearranged_twiddle_tab_stride2_arr[stage]];
  259. q15_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[
  260. S->rearranged_twiddle_tab_stride3_arr[stage]];
  261. q15_t const *p_rearranged_twiddle_tab_stride1 =
  262. &S->rearranged_twiddle_stride1[
  263. S->rearranged_twiddle_tab_stride1_arr[stage]];
  264. q15_t * pBase = pSrc;
  265. for (int i = 0; i < iter; i++)
  266. {
  267. q15_t *inA = pBase;
  268. q15_t *inB = inA + n2 * CMPLX_DIM;
  269. q15_t *inC = inB + n2 * CMPLX_DIM;
  270. q15_t *inD = inC + n2 * CMPLX_DIM;
  271. q15_t const *pW1 = p_rearranged_twiddle_tab_stride1;
  272. q15_t const *pW2 = p_rearranged_twiddle_tab_stride2;
  273. q15_t const *pW3 = p_rearranged_twiddle_tab_stride3;
  274. q15x8_t vecW;
  275. blkCnt = n2 / 4;
  276. /*
  277. * load 4 x q15 complex pair
  278. */
  279. vecA = vldrhq_s16(inA);
  280. vecC = vldrhq_s16(inC);
  281. while (blkCnt > 0U)
  282. {
  283. vecB = vldrhq_s16(inB);
  284. vecD = vldrhq_s16(inD);
  285. vecSum0 = vhaddq(vecA, vecC);
  286. vecDiff0 = vhsubq(vecA, vecC);
  287. vecSum1 = vhaddq(vecB, vecD);
  288. vecDiff1 = vhsubq(vecB, vecD);
  289. /*
  290. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  291. */
  292. vecTmp0 = vhaddq(vecSum0, vecSum1);
  293. vst1q(inA, vecTmp0);
  294. inA += 8;
  295. /*
  296. * [ 1 -1 1 -1 ] * [ A B C D ]'
  297. */
  298. vecTmp0 = vhsubq(vecSum0, vecSum1);
  299. /*
  300. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  301. */
  302. vecW = vld1q(pW2);
  303. pW2 += 8;
  304. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q15x8_t);
  305. vst1q(inB, vecTmp1);
  306. inB += 8;
  307. /*
  308. * [ 1 -i -1 +i ] * [ A B C D ]'
  309. */
  310. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  311. /*
  312. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  313. */
  314. vecW = vld1q(pW1);
  315. pW1 += 8;
  316. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q15x8_t);
  317. vst1q(inC, vecTmp1);
  318. inC += 8;
  319. /*
  320. * [ 1 +i -1 -i ] * [ A B C D ]'
  321. */
  322. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  323. /*
  324. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  325. */
  326. vecW = vld1q(pW3);
  327. pW3 += 8;
  328. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q15x8_t);
  329. vst1q(inD, vecTmp1);
  330. inD += 8;
  331. vecA = vldrhq_s16(inA);
  332. vecC = vldrhq_s16(inC);
  333. blkCnt--;
  334. }
  335. pBase += CMPLX_DIM * n1;
  336. }
  337. n1 = n2;
  338. n2 >>= 2u;
  339. iter = iter << 2;
  340. stage++;
  341. }
  342. /*
  343. * start of Last stage process
  344. */
  345. uint32x4_t vecScGathAddr = vld1q_u32((uint32_t*)strides);
  346. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  347. /*
  348. * load scheduling
  349. */
  350. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  351. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  352. blkCnt = (fftLen >> 4);
  353. while (blkCnt > 0U)
  354. {
  355. vecSum0 = vhaddq(vecA, vecC);
  356. vecDiff0 = vhsubq(vecA, vecC);
  357. vecB = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 4);
  358. vecD = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 12);
  359. vecSum1 = vhaddq(vecB, vecD);
  360. vecDiff1 = vhsubq(vecB, vecD);
  361. /*
  362. * pre-load for next iteration
  363. */
  364. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  365. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  366. vecTmp0 = vhaddq(vecSum0, vecSum1);
  367. vstrwq_scatter_base_s32(vecScGathAddr, -64, (int32x4_t) vecTmp0);
  368. vecTmp0 = vhsubq(vecSum0, vecSum1);
  369. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 4, (int32x4_t) vecTmp0);
  370. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  371. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, (int32x4_t) vecTmp0);
  372. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  373. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 12, (int32x4_t) vecTmp0);
  374. blkCnt--;
  375. }
  376. }
  377. static void arm_cfft_radix4by2_inverse_q15_mve(const arm_cfft_instance_q15 *S, q15_t *pSrc, uint32_t fftLen)
  378. {
  379. uint32_t n2;
  380. q15_t *pIn0;
  381. q15_t *pIn1;
  382. const q15_t *pCoef = S->pTwiddle;
  383. uint32_t blkCnt;
  384. q15x8_t vecIn0, vecIn1, vecSum, vecDiff;
  385. q15x8_t vecCmplxTmp, vecTw;
  386. q15_t const *pCoefVec;
  387. n2 = fftLen >> 1;
  388. pIn0 = pSrc;
  389. pIn1 = pSrc + fftLen;
  390. pCoefVec = pCoef;
  391. blkCnt = n2 / 4;
  392. while (blkCnt > 0U)
  393. {
  394. vecIn0 = *(q15x8_t *) pIn0;
  395. vecIn1 = *(q15x8_t *) pIn1;
  396. vecIn0 = vecIn0 >> 1;
  397. vecIn1 = vecIn1 >> 1;
  398. vecSum = vhaddq(vecIn0, vecIn1);
  399. vst1q(pIn0, vecSum);
  400. pIn0 += 8;
  401. vecTw = vld1q(pCoefVec);
  402. pCoefVec += 8;
  403. vecDiff = vhsubq(vecIn0, vecIn1);
  404. vecCmplxTmp = vqrdmlsdhq(vuninitializedq_s16() , vecDiff, vecTw);
  405. vecCmplxTmp = vqrdmladhxq(vecCmplxTmp, vecDiff, vecTw);
  406. vst1q(pIn1, vecCmplxTmp);
  407. pIn1 += 8;
  408. blkCnt--;
  409. }
  410. _arm_radix4_butterfly_inverse_q15_mve(S, pSrc, n2);
  411. _arm_radix4_butterfly_inverse_q15_mve(S, pSrc + fftLen, n2);
  412. pIn0 = pSrc;
  413. blkCnt = (fftLen << 1) >> 3;
  414. while (blkCnt > 0U)
  415. {
  416. vecIn0 = *(q15x8_t *) pIn0;
  417. vecIn0 = vecIn0 << 1;
  418. vst1q(pIn0, vecIn0);
  419. pIn0 += 8;
  420. blkCnt--;
  421. }
  422. /*
  423. * tail
  424. * (will be merged thru tail predication)
  425. */
  426. blkCnt = (fftLen << 1) & 7;
  427. while (blkCnt > 0U)
  428. {
  429. mve_pred16_t p0 = vctp16q(blkCnt);
  430. vecIn0 = *(q15x8_t *) pIn0;
  431. vecIn0 = vecIn0 << 1;
  432. vstrhq_p(pIn0, vecIn0, p0);
  433. }
  434. }
  435. /**
  436. @ingroup groupTransforms
  437. */
  438. /**
  439. @addtogroup ComplexFFT
  440. @{
  441. */
  442. /**
  443. @brief Processing function for Q15 complex FFT.
  444. @param[in] S points to an instance of Q15 CFFT structure
  445. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  446. @param[in] ifftFlag flag that selects transform direction
  447. - value = 0: forward transform
  448. - value = 1: inverse transform
  449. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  450. - value = 0: disables bit reversal of output
  451. - value = 1: enables bit reversal of output
  452. @return none
  453. */
  454. void arm_cfft_q15(
  455. const arm_cfft_instance_q15 * S,
  456. q15_t * pSrc,
  457. uint8_t ifftFlag,
  458. uint8_t bitReverseFlag)
  459. {
  460. uint32_t fftLen = S->fftLen;
  461. if (ifftFlag == 1U) {
  462. switch (fftLen) {
  463. case 16:
  464. case 64:
  465. case 256:
  466. case 1024:
  467. case 4096:
  468. _arm_radix4_butterfly_inverse_q15_mve(S, pSrc, fftLen);
  469. break;
  470. case 32:
  471. case 128:
  472. case 512:
  473. case 2048:
  474. arm_cfft_radix4by2_inverse_q15_mve(S, pSrc, fftLen);
  475. break;
  476. }
  477. } else {
  478. switch (fftLen) {
  479. case 16:
  480. case 64:
  481. case 256:
  482. case 1024:
  483. case 4096:
  484. _arm_radix4_butterfly_q15_mve(S, pSrc, fftLen);
  485. break;
  486. case 32:
  487. case 128:
  488. case 512:
  489. case 2048:
  490. arm_cfft_radix4by2_q15_mve(S, pSrc, fftLen);
  491. break;
  492. }
  493. }
  494. if (bitReverseFlag)
  495. {
  496. arm_bitreversal_16_inpl_mve((uint16_t*)pSrc, S->bitRevLength, S->pBitRevTable);
  497. }
  498. }
  499. #else
  500. extern void arm_radix4_butterfly_q15(
  501. q15_t * pSrc,
  502. uint32_t fftLen,
  503. const q15_t * pCoef,
  504. uint32_t twidCoefModifier);
  505. extern void arm_radix4_butterfly_inverse_q15(
  506. q15_t * pSrc,
  507. uint32_t fftLen,
  508. const q15_t * pCoef,
  509. uint32_t twidCoefModifier);
  510. extern void arm_bitreversal_16(
  511. uint16_t * pSrc,
  512. const uint16_t bitRevLen,
  513. const uint16_t * pBitRevTable);
  514. void arm_cfft_radix4by2_q15(
  515. q15_t * pSrc,
  516. uint32_t fftLen,
  517. const q15_t * pCoef);
  518. void arm_cfft_radix4by2_inverse_q15(
  519. q15_t * pSrc,
  520. uint32_t fftLen,
  521. const q15_t * pCoef);
  522. /**
  523. @ingroup groupTransforms
  524. */
  525. /**
  526. @addtogroup ComplexFFT
  527. @{
  528. */
  529. /**
  530. @brief Processing function for Q15 complex FFT.
  531. @param[in] S points to an instance of Q15 CFFT structure
  532. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  533. @param[in] ifftFlag flag that selects transform direction
  534. - value = 0: forward transform
  535. - value = 1: inverse transform
  536. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  537. - value = 0: disables bit reversal of output
  538. - value = 1: enables bit reversal of output
  539. @return none
  540. */
  541. void arm_cfft_q15(
  542. const arm_cfft_instance_q15 * S,
  543. q15_t * p1,
  544. uint8_t ifftFlag,
  545. uint8_t bitReverseFlag)
  546. {
  547. uint32_t L = S->fftLen;
  548. if (ifftFlag == 1U)
  549. {
  550. switch (L)
  551. {
  552. case 16:
  553. case 64:
  554. case 256:
  555. case 1024:
  556. case 4096:
  557. arm_radix4_butterfly_inverse_q15 ( p1, L, (q15_t*)S->pTwiddle, 1 );
  558. break;
  559. case 32:
  560. case 128:
  561. case 512:
  562. case 2048:
  563. arm_cfft_radix4by2_inverse_q15 ( p1, L, S->pTwiddle );
  564. break;
  565. }
  566. }
  567. else
  568. {
  569. switch (L)
  570. {
  571. case 16:
  572. case 64:
  573. case 256:
  574. case 1024:
  575. case 4096:
  576. arm_radix4_butterfly_q15 ( p1, L, (q15_t*)S->pTwiddle, 1 );
  577. break;
  578. case 32:
  579. case 128:
  580. case 512:
  581. case 2048:
  582. arm_cfft_radix4by2_q15 ( p1, L, S->pTwiddle );
  583. break;
  584. }
  585. }
  586. if ( bitReverseFlag )
  587. arm_bitreversal_16 ((uint16_t*) p1, S->bitRevLength, S->pBitRevTable);
  588. }
  589. /**
  590. @} end of ComplexFFT group
  591. */
  592. void arm_cfft_radix4by2_q15(
  593. q15_t * pSrc,
  594. uint32_t fftLen,
  595. const q15_t * pCoef)
  596. {
  597. uint32_t i;
  598. uint32_t n2;
  599. q15_t p0, p1, p2, p3;
  600. #if defined (ARM_MATH_DSP)
  601. q31_t T, S, R;
  602. q31_t coeff, out1, out2;
  603. const q15_t *pC = pCoef;
  604. q15_t *pSi = pSrc;
  605. q15_t *pSl = pSrc + fftLen;
  606. #else
  607. uint32_t l;
  608. q15_t xt, yt, cosVal, sinVal;
  609. #endif
  610. n2 = fftLen >> 1U;
  611. #if defined (ARM_MATH_DSP)
  612. for (i = n2; i > 0; i--)
  613. {
  614. coeff = read_q15x2_ia (&pC);
  615. T = read_q15x2 (pSi);
  616. T = __SHADD16(T, 0); /* this is just a SIMD arithmetic shift right by 1 */
  617. S = read_q15x2 (pSl);
  618. S = __SHADD16(S, 0); /* this is just a SIMD arithmetic shift right by 1 */
  619. R = __QSUB16(T, S);
  620. write_q15x2_ia (&pSi, __SHADD16(T, S));
  621. #ifndef ARM_MATH_BIG_ENDIAN
  622. out1 = __SMUAD(coeff, R) >> 16U;
  623. out2 = __SMUSDX(coeff, R);
  624. #else
  625. out1 = __SMUSDX(R, coeff) >> 16U;
  626. out2 = __SMUAD(coeff, R);
  627. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  628. write_q15x2_ia (&pSl, (q31_t)__PKHBT( out1, out2, 0 ) );
  629. }
  630. #else /* #if defined (ARM_MATH_DSP) */
  631. for (i = 0; i < n2; i++)
  632. {
  633. cosVal = pCoef[2 * i];
  634. sinVal = pCoef[2 * i + 1];
  635. l = i + n2;
  636. xt = (pSrc[2 * i] >> 1U) - (pSrc[2 * l] >> 1U);
  637. pSrc[2 * i] = ((pSrc[2 * i] >> 1U) + (pSrc[2 * l] >> 1U)) >> 1U;
  638. yt = (pSrc[2 * i + 1] >> 1U) - (pSrc[2 * l + 1] >> 1U);
  639. pSrc[2 * i + 1] = ((pSrc[2 * l + 1] >> 1U) + (pSrc[2 * i + 1] >> 1U)) >> 1U;
  640. pSrc[2 * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16U)) +
  641. ((int16_t) (((q31_t) yt * sinVal) >> 16U)) );
  642. pSrc[2 * l + 1] = (((int16_t) (((q31_t) yt * cosVal) >> 16U)) -
  643. ((int16_t) (((q31_t) xt * sinVal) >> 16U)) );
  644. }
  645. #endif /* #if defined (ARM_MATH_DSP) */
  646. /* first col */
  647. arm_radix4_butterfly_q15( pSrc, n2, (q15_t*)pCoef, 2U);
  648. /* second col */
  649. arm_radix4_butterfly_q15( pSrc + fftLen, n2, (q15_t*)pCoef, 2U);
  650. n2 = fftLen >> 1U;
  651. for (i = 0; i < n2; i++)
  652. {
  653. p0 = pSrc[4 * i + 0];
  654. p1 = pSrc[4 * i + 1];
  655. p2 = pSrc[4 * i + 2];
  656. p3 = pSrc[4 * i + 3];
  657. p0 <<= 1U;
  658. p1 <<= 1U;
  659. p2 <<= 1U;
  660. p3 <<= 1U;
  661. pSrc[4 * i + 0] = p0;
  662. pSrc[4 * i + 1] = p1;
  663. pSrc[4 * i + 2] = p2;
  664. pSrc[4 * i + 3] = p3;
  665. }
  666. }
  667. void arm_cfft_radix4by2_inverse_q15(
  668. q15_t * pSrc,
  669. uint32_t fftLen,
  670. const q15_t * pCoef)
  671. {
  672. uint32_t i;
  673. uint32_t n2;
  674. q15_t p0, p1, p2, p3;
  675. #if defined (ARM_MATH_DSP)
  676. q31_t T, S, R;
  677. q31_t coeff, out1, out2;
  678. const q15_t *pC = pCoef;
  679. q15_t *pSi = pSrc;
  680. q15_t *pSl = pSrc + fftLen;
  681. #else
  682. uint32_t l;
  683. q15_t xt, yt, cosVal, sinVal;
  684. #endif
  685. n2 = fftLen >> 1U;
  686. #if defined (ARM_MATH_DSP)
  687. for (i = n2; i > 0; i--)
  688. {
  689. coeff = read_q15x2_ia (&pC);
  690. T = read_q15x2 (pSi);
  691. T = __SHADD16(T, 0); /* this is just a SIMD arithmetic shift right by 1 */
  692. S = read_q15x2 (pSl);
  693. S = __SHADD16(S, 0); /* this is just a SIMD arithmetic shift right by 1 */
  694. R = __QSUB16(T, S);
  695. write_q15x2_ia (&pSi, __SHADD16(T, S));
  696. #ifndef ARM_MATH_BIG_ENDIAN
  697. out1 = __SMUSD(coeff, R) >> 16U;
  698. out2 = __SMUADX(coeff, R);
  699. #else
  700. out1 = __SMUADX(R, coeff) >> 16U;
  701. out2 = __SMUSD(__QSUB(0, coeff), R);
  702. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  703. write_q15x2_ia (&pSl, (q31_t)__PKHBT( out1, out2, 0 ));
  704. }
  705. #else /* #if defined (ARM_MATH_DSP) */
  706. for (i = 0; i < n2; i++)
  707. {
  708. cosVal = pCoef[2 * i];
  709. sinVal = pCoef[2 * i + 1];
  710. l = i + n2;
  711. xt = (pSrc[2 * i] >> 1U) - (pSrc[2 * l] >> 1U);
  712. pSrc[2 * i] = ((pSrc[2 * i] >> 1U) + (pSrc[2 * l] >> 1U)) >> 1U;
  713. yt = (pSrc[2 * i + 1] >> 1U) - (pSrc[2 * l + 1] >> 1U);
  714. pSrc[2 * i + 1] = ((pSrc[2 * l + 1] >> 1U) + (pSrc[2 * i + 1] >> 1U)) >> 1U;
  715. pSrc[2 * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16U)) -
  716. ((int16_t) (((q31_t) yt * sinVal) >> 16U)) );
  717. pSrc[2 * l + 1] = (((int16_t) (((q31_t) yt * cosVal) >> 16U)) +
  718. ((int16_t) (((q31_t) xt * sinVal) >> 16U)) );
  719. }
  720. #endif /* #if defined (ARM_MATH_DSP) */
  721. /* first col */
  722. arm_radix4_butterfly_inverse_q15( pSrc, n2, (q15_t*)pCoef, 2U);
  723. /* second col */
  724. arm_radix4_butterfly_inverse_q15( pSrc + fftLen, n2, (q15_t*)pCoef, 2U);
  725. n2 = fftLen >> 1U;
  726. for (i = 0; i < n2; i++)
  727. {
  728. p0 = pSrc[4 * i + 0];
  729. p1 = pSrc[4 * i + 1];
  730. p2 = pSrc[4 * i + 2];
  731. p3 = pSrc[4 * i + 3];
  732. p0 <<= 1U;
  733. p1 <<= 1U;
  734. p2 <<= 1U;
  735. p3 <<= 1U;
  736. pSrc[4 * i + 0] = p0;
  737. pSrc[4 * i + 1] = p1;
  738. pSrc[4 * i + 2] = p2;
  739. pSrc[4 * i + 3] = p3;
  740. }
  741. }
  742. #endif /* defined(ARM_MATH_MVEI) */