arm_cfft_q15.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_cfft_q15.c
  4. * Description: Combined Radix Decimation in Q15 Frequency CFFT processing function
  5. *
  6. * $Date: 18. March 2019
  7. * $Revision: V1.6.0
  8. *
  9. * Target Processor: Cortex-M cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #include "arm_math.h"
  29. #if defined(ARM_MATH_MVEI)
  30. #include "arm_vec_fft.h"
  31. static void arm_bitreversal_16_inpl_mve(
  32. uint16_t *pSrc,
  33. const uint16_t bitRevLen,
  34. const uint16_t *pBitRevTab)
  35. {
  36. uint32_t *src = (uint32_t *)pSrc;
  37. uint32_t blkCnt; /* loop counters */
  38. uint32x4_t bitRevTabOff;
  39. uint16x8_t one = vdupq_n_u16(1);
  40. blkCnt = (bitRevLen / 2) / 4;
  41. while (blkCnt > 0U) {
  42. bitRevTabOff = vldrhq_u16(pBitRevTab);
  43. pBitRevTab += 8;
  44. uint32x4_t bitRevOff1 = vmullbq_int_u16(bitRevTabOff, one);
  45. uint32x4_t bitRevOff2 = vmulltq_int_u16(bitRevTabOff, one);
  46. bitRevOff1 = bitRevOff1 >> 3;
  47. bitRevOff2 = bitRevOff2 >> 3;
  48. uint32x4_t in1 = vldrwq_gather_shifted_offset_u32(src, bitRevOff1);
  49. uint32x4_t in2 = vldrwq_gather_shifted_offset_u32(src, bitRevOff2);
  50. vstrwq_scatter_shifted_offset_u32(src, bitRevOff1, in2);
  51. vstrwq_scatter_shifted_offset_u32(src, bitRevOff2, in1);
  52. /*
  53. * Decrement the blockSize loop counter
  54. */
  55. blkCnt--;
  56. }
  57. /*
  58. * tail
  59. * (will be merged thru tail predication)
  60. */
  61. blkCnt = bitRevLen & 7;
  62. if (blkCnt > 0U) {
  63. mve_pred16_t p0 = vctp16q(blkCnt);
  64. bitRevTabOff = vldrhq_z_u16(pBitRevTab, p0);
  65. uint32x4_t bitRevOff1 = vmullbq_int_u16(bitRevTabOff, one);
  66. uint32x4_t bitRevOff2 = vmulltq_int_u16(bitRevTabOff, one);
  67. bitRevOff1 = bitRevOff1 >> 3;
  68. bitRevOff2 = bitRevOff2 >> 3;
  69. uint32x4_t in1 = vldrwq_gather_shifted_offset_z_u32(src, bitRevOff1, p0);
  70. uint32x4_t in2 = vldrwq_gather_shifted_offset_z_u32(src, bitRevOff2, p0);
  71. vstrwq_scatter_shifted_offset_p_u32(src, bitRevOff1, in2, p0);
  72. vstrwq_scatter_shifted_offset_p_u32(src, bitRevOff2, in1, p0);
  73. }
  74. }
  75. static void _arm_radix4_butterfly_q15_mve(
  76. const arm_cfft_instance_q15 * S,
  77. q15_t *pSrc,
  78. uint32_t fftLen)
  79. {
  80. q15x8_t vecTmp0, vecTmp1;
  81. q15x8_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  82. q15x8_t vecA, vecB, vecC, vecD;
  83. q15x8_t vecW;
  84. uint32_t blkCnt;
  85. uint32_t n1, n2;
  86. uint32_t stage = 0;
  87. int32_t iter = 1;
  88. static const uint32_t strides[4] = {
  89. (0 - 16) * sizeof(q15_t *), (4 - 16) * sizeof(q15_t *),
  90. (8 - 16) * sizeof(q15_t *), (12 - 16) * sizeof(q15_t *)
  91. };
  92. /*
  93. * Process first stages
  94. * Each stage in middle stages provides two down scaling of the input
  95. */
  96. n2 = fftLen;
  97. n1 = n2;
  98. n2 >>= 2u;
  99. for (int k = fftLen / 4u; k > 1; k >>= 2u)
  100. {
  101. for (int i = 0; i < iter; i++)
  102. {
  103. q15_t const *p_rearranged_twiddle_tab_stride2 =
  104. &S->rearranged_twiddle_stride2[
  105. S->rearranged_twiddle_tab_stride2_arr[stage]];
  106. q15_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[
  107. S->rearranged_twiddle_tab_stride3_arr[stage]];
  108. q15_t const *p_rearranged_twiddle_tab_stride1 =
  109. &S->rearranged_twiddle_stride1[
  110. S->rearranged_twiddle_tab_stride1_arr[stage]];
  111. q15_t const *pW1, *pW2, *pW3;
  112. q15_t *inA = pSrc + CMPLX_DIM * i * n1;
  113. q15_t *inB = inA + n2 * CMPLX_DIM;
  114. q15_t *inC = inB + n2 * CMPLX_DIM;
  115. q15_t *inD = inC + n2 * CMPLX_DIM;
  116. pW1 = p_rearranged_twiddle_tab_stride1;
  117. pW2 = p_rearranged_twiddle_tab_stride2;
  118. pW3 = p_rearranged_twiddle_tab_stride3;
  119. blkCnt = n2 / 4;
  120. /*
  121. * load 4 x q15 complex pair
  122. */
  123. vecA = vldrhq_s16(inA);
  124. vecC = vldrhq_s16(inC);
  125. while (blkCnt > 0U)
  126. {
  127. vecB = vldrhq_s16(inB);
  128. vecD = vldrhq_s16(inD);
  129. vecSum0 = vhaddq(vecA, vecC);
  130. vecDiff0 = vhsubq(vecA, vecC);
  131. vecSum1 = vhaddq(vecB, vecD);
  132. vecDiff1 = vhsubq(vecB, vecD);
  133. /*
  134. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  135. */
  136. vecTmp0 = vhaddq(vecSum0, vecSum1);
  137. vst1q(inA, vecTmp0);
  138. inA += 8;
  139. /*
  140. * [ 1 -1 1 -1 ] * [ A B C D ]'
  141. */
  142. vecTmp0 = vhsubq(vecSum0, vecSum1);
  143. /*
  144. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  145. */
  146. vecW = vld1q(pW2);
  147. pW2 += 8;
  148. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0);
  149. vst1q(inB, vecTmp1);
  150. inB += 8;
  151. /*
  152. * [ 1 -i -1 +i ] * [ A B C D ]'
  153. */
  154. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  155. /*
  156. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  157. */
  158. vecW = vld1q(pW1);
  159. pW1 += 8;
  160. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0);
  161. vst1q(inC, vecTmp1);
  162. inC += 8;
  163. /*
  164. * [ 1 +i -1 -i ] * [ A B C D ]'
  165. */
  166. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  167. /*
  168. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  169. */
  170. vecW = vld1q(pW3);
  171. pW3 += 8;
  172. vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0);
  173. vst1q(inD, vecTmp1);
  174. inD += 8;
  175. vecA = vldrhq_s16(inA);
  176. vecC = vldrhq_s16(inC);
  177. blkCnt--;
  178. }
  179. }
  180. n1 = n2;
  181. n2 >>= 2u;
  182. iter = iter << 2;
  183. stage++;
  184. }
  185. /*
  186. * start of Last stage process
  187. */
  188. uint32x4_t vecScGathAddr = *(uint32x4_t *) strides;
  189. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  190. /*
  191. * load scheduling
  192. */
  193. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  194. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  195. blkCnt = (fftLen >> 4);
  196. while (blkCnt > 0U)
  197. {
  198. vecSum0 = vhaddq(vecA, vecC);
  199. vecDiff0 = vhsubq(vecA, vecC);
  200. vecB = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 4);
  201. vecD = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 12);
  202. vecSum1 = vhaddq(vecB, vecD);
  203. vecDiff1 = vhsubq(vecB, vecD);
  204. /*
  205. * pre-load for next iteration
  206. */
  207. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  208. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  209. vecTmp0 = vhaddq(vecSum0, vecSum1);
  210. vstrwq_scatter_base_s32(vecScGathAddr, -64, (q15x8_t) vecTmp0);
  211. vecTmp0 = vhsubq(vecSum0, vecSum1);
  212. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 4, (q15x8_t) vecTmp0);
  213. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  214. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, (q15x8_t) vecTmp0);
  215. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  216. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 12, (q15x8_t) vecTmp0);
  217. blkCnt--;
  218. }
  219. }
  220. static void arm_cfft_radix4by2_q15_mve(const arm_cfft_instance_q15 *S, q15_t *pSrc, uint32_t fftLen)
  221. {
  222. uint32_t n2;
  223. q15_t *pIn0;
  224. q15_t *pIn1;
  225. const q15_t *pCoef = S->pTwiddle;
  226. uint32_t blkCnt;
  227. q15x8_t vecIn0, vecIn1, vecSum, vecDiff;
  228. q15x8_t vecCmplxTmp, vecTw;
  229. q15_t const *pCoefVec;
  230. n2 = fftLen >> 1;
  231. pIn0 = pSrc;
  232. pIn1 = pSrc + fftLen;
  233. pCoefVec = pCoef;
  234. blkCnt = n2 / 4;
  235. while (blkCnt > 0U)
  236. {
  237. vecIn0 = *(q15x8_t *) pIn0;
  238. vecIn1 = *(q15x8_t *) pIn1;
  239. vecIn0 = vecIn0 >> 1;
  240. vecIn1 = vecIn1 >> 1;
  241. vecSum = vhaddq(vecIn0, vecIn1);
  242. vst1q(pIn0, vecSum);
  243. pIn0 += 8;
  244. vecTw = vld1q(pCoefVec);
  245. pCoefVec += 8;
  246. vecDiff = vhsubq(vecIn0, vecIn1);
  247. vecCmplxTmp = MVE_CMPLX_MULT_FX_AxConjB(vecDiff, vecTw);
  248. vst1q(pIn1, vecCmplxTmp);
  249. pIn1 += 8;
  250. blkCnt--;
  251. }
  252. _arm_radix4_butterfly_q15_mve(S, pSrc, n2);
  253. _arm_radix4_butterfly_q15_mve(S, pSrc + fftLen, n2);
  254. pIn0 = pSrc;
  255. blkCnt = (fftLen << 1) >> 3;
  256. while (blkCnt > 0U)
  257. {
  258. vecIn0 = *(q15x8_t *) pIn0;
  259. vecIn0 = vecIn0 << 1;
  260. vst1q(pIn0, vecIn0);
  261. pIn0 += 8;
  262. blkCnt--;
  263. }
  264. /*
  265. * tail
  266. * (will be merged thru tail predication)
  267. */
  268. blkCnt = (fftLen << 1) & 7;
  269. if (blkCnt > 0U)
  270. {
  271. mve_pred16_t p0 = vctp16q(blkCnt);
  272. vecIn0 = *(q15x8_t *) pIn0;
  273. vecIn0 = vecIn0 << 1;
  274. vstrhq_p(pIn0, vecIn0, p0);
  275. }
  276. }
  277. static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S,q15_t *pSrc, uint32_t fftLen)
  278. {
  279. q15x8_t vecTmp0, vecTmp1;
  280. q15x8_t vecSum0, vecDiff0, vecSum1, vecDiff1;
  281. q15x8_t vecA, vecB, vecC, vecD;
  282. q15x8_t vecW;
  283. uint32_t blkCnt;
  284. uint32_t n1, n2;
  285. uint32_t stage = 0;
  286. int32_t iter = 1;
  287. static const uint32_t strides[4] = {
  288. (0 - 16) * sizeof(q15_t *), (4 - 16) * sizeof(q15_t *),
  289. (8 - 16) * sizeof(q15_t *), (12 - 16) * sizeof(q15_t *)
  290. };
  291. /*
  292. * Process first stages
  293. * Each stage in middle stages provides two down scaling of the input
  294. */
  295. n2 = fftLen;
  296. n1 = n2;
  297. n2 >>= 2u;
  298. for (int k = fftLen / 4u; k > 1; k >>= 2u)
  299. {
  300. for (int i = 0; i < iter; i++)
  301. {
  302. q15_t const *p_rearranged_twiddle_tab_stride2 =
  303. &S->rearranged_twiddle_stride2[
  304. S->rearranged_twiddle_tab_stride2_arr[stage]];
  305. q15_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[
  306. S->rearranged_twiddle_tab_stride3_arr[stage]];
  307. q15_t const *p_rearranged_twiddle_tab_stride1 =
  308. &S->rearranged_twiddle_stride1[
  309. S->rearranged_twiddle_tab_stride1_arr[stage]];
  310. q15_t const *pW1, *pW2, *pW3;
  311. q15_t *inA = pSrc + CMPLX_DIM * i * n1;
  312. q15_t *inB = inA + n2 * CMPLX_DIM;
  313. q15_t *inC = inB + n2 * CMPLX_DIM;
  314. q15_t *inD = inC + n2 * CMPLX_DIM;
  315. pW1 = p_rearranged_twiddle_tab_stride1;
  316. pW2 = p_rearranged_twiddle_tab_stride2;
  317. pW3 = p_rearranged_twiddle_tab_stride3;
  318. blkCnt = n2 / 4;
  319. /*
  320. * load 4 x q15 complex pair
  321. */
  322. vecA = vldrhq_s16(inA);
  323. vecC = vldrhq_s16(inC);
  324. while (blkCnt > 0U)
  325. {
  326. vecB = vldrhq_s16(inB);
  327. vecD = vldrhq_s16(inD);
  328. vecSum0 = vhaddq(vecA, vecC);
  329. vecDiff0 = vhsubq(vecA, vecC);
  330. vecSum1 = vhaddq(vecB, vecD);
  331. vecDiff1 = vhsubq(vecB, vecD);
  332. /*
  333. * [ 1 1 1 1 ] * [ A B C D ]' .* 1
  334. */
  335. vecTmp0 = vhaddq(vecSum0, vecSum1);
  336. vst1q(inA, vecTmp0);
  337. inA += 8;
  338. /*
  339. * [ 1 -1 1 -1 ] * [ A B C D ]'
  340. */
  341. vecTmp0 = vhsubq(vecSum0, vecSum1);
  342. /*
  343. * [ 1 -1 1 -1 ] * [ A B C D ]'.* W2
  344. */
  345. vecW = vld1q(pW2);
  346. pW2 += 8;
  347. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW);
  348. vst1q(inB, vecTmp1);
  349. inB += 8;
  350. /*
  351. * [ 1 -i -1 +i ] * [ A B C D ]'
  352. */
  353. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  354. /*
  355. * [ 1 -i -1 +i ] * [ A B C D ]'.* W1
  356. */
  357. vecW = vld1q(pW1);
  358. pW1 += 8;
  359. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW);
  360. vst1q(inC, vecTmp1);
  361. inC += 8;
  362. /*
  363. * [ 1 +i -1 -i ] * [ A B C D ]'
  364. */
  365. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  366. /*
  367. * [ 1 +i -1 -i ] * [ A B C D ]'.* W3
  368. */
  369. vecW = vld1q(pW3);
  370. pW3 += 8;
  371. vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW);
  372. vst1q(inD, vecTmp1);
  373. inD += 8;
  374. vecA = vldrhq_s16(inA);
  375. vecC = vldrhq_s16(inC);
  376. blkCnt--;
  377. }
  378. }
  379. n1 = n2;
  380. n2 >>= 2u;
  381. iter = iter << 2;
  382. stage++;
  383. }
  384. /*
  385. * start of Last stage process
  386. */
  387. uint32x4_t vecScGathAddr = *(uint32x4_t *) strides;
  388. vecScGathAddr = vecScGathAddr + (uint32_t) pSrc;
  389. /*
  390. * load scheduling
  391. */
  392. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  393. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  394. blkCnt = (fftLen >> 4);
  395. while (blkCnt > 0U)
  396. {
  397. vecSum0 = vhaddq(vecA, vecC);
  398. vecDiff0 = vhsubq(vecA, vecC);
  399. vecB = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 4);
  400. vecD = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 12);
  401. vecSum1 = vhaddq(vecB, vecD);
  402. vecDiff1 = vhsubq(vecB, vecD);
  403. /*
  404. * pre-load for next iteration
  405. */
  406. vecA = (q15x8_t) vldrwq_gather_base_wb_s32(&vecScGathAddr, 64);
  407. vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8);
  408. vecTmp0 = vhaddq(vecSum0, vecSum1);
  409. vstrwq_scatter_base_s32(vecScGathAddr, -64, (q15x8_t) vecTmp0);
  410. vecTmp0 = vhsubq(vecSum0, vecSum1);
  411. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 4, (q15x8_t) vecTmp0);
  412. vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1);
  413. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, (q15x8_t) vecTmp0);
  414. vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1);
  415. vstrwq_scatter_base_s32(vecScGathAddr, -64 + 12, (q15x8_t) vecTmp0);
  416. blkCnt--;
  417. }
  418. }
  419. static void arm_cfft_radix4by2_inverse_q15_mve(const arm_cfft_instance_q15 *S, q15_t *pSrc, uint32_t fftLen)
  420. {
  421. uint32_t n2;
  422. q15_t *pIn0;
  423. q15_t *pIn1;
  424. const q15_t *pCoef = S->pTwiddle;
  425. uint32_t blkCnt;
  426. q15x8_t vecIn0, vecIn1, vecSum, vecDiff;
  427. q15x8_t vecCmplxTmp, vecTw;
  428. q15_t const *pCoefVec;
  429. n2 = fftLen >> 1;
  430. pIn0 = pSrc;
  431. pIn1 = pSrc + fftLen;
  432. pCoefVec = pCoef;
  433. blkCnt = n2 / 4;
  434. while (blkCnt > 0U)
  435. {
  436. vecIn0 = *(q15x8_t *) pIn0;
  437. vecIn1 = *(q15x8_t *) pIn1;
  438. vecIn0 = vecIn0 >> 1;
  439. vecIn1 = vecIn1 >> 1;
  440. vecSum = vhaddq(vecIn0, vecIn1);
  441. vst1q(pIn0, vecSum);
  442. pIn0 += 8;
  443. vecTw = vld1q(pCoefVec);
  444. pCoefVec += 8;
  445. vecDiff = vhsubq(vecIn0, vecIn1);
  446. vecCmplxTmp = vqrdmlsdhq(vuninitializedq_s16() , vecDiff, vecTw);
  447. vecCmplxTmp = vqrdmladhxq(vecCmplxTmp, vecDiff, vecTw);
  448. vst1q(pIn1, vecCmplxTmp);
  449. pIn1 += 8;
  450. blkCnt--;
  451. }
  452. _arm_radix4_butterfly_inverse_q15_mve(S, pSrc, n2);
  453. _arm_radix4_butterfly_inverse_q15_mve(S, pSrc + fftLen, n2);
  454. pIn0 = pSrc;
  455. blkCnt = (fftLen << 1) >> 3;
  456. while (blkCnt > 0U)
  457. {
  458. vecIn0 = *(q15x8_t *) pIn0;
  459. vecIn0 = vecIn0 << 1;
  460. vst1q(pIn0, vecIn0);
  461. pIn0 += 8;
  462. blkCnt--;
  463. }
  464. /*
  465. * tail
  466. * (will be merged thru tail predication)
  467. */
  468. blkCnt = (fftLen << 1) & 7;
  469. while (blkCnt > 0U)
  470. {
  471. mve_pred16_t p0 = vctp16q(blkCnt);
  472. vecIn0 = *(q15x8_t *) pIn0;
  473. vecIn0 = vecIn0 << 1;
  474. vstrhq_p(pIn0, vecIn0, p0);
  475. }
  476. }
  477. /**
  478. @ingroup groupTransforms
  479. */
  480. /**
  481. @addtogroup ComplexFFT
  482. @{
  483. */
  484. /**
  485. @brief Processing function for Q15 complex FFT.
  486. @param[in] S points to an instance of Q15 CFFT structure
  487. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  488. @param[in] ifftFlag flag that selects transform direction
  489. - value = 0: forward transform
  490. - value = 1: inverse transform
  491. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  492. - value = 0: disables bit reversal of output
  493. - value = 1: enables bit reversal of output
  494. @return none
  495. */
  496. void arm_cfft_q15(
  497. const arm_cfft_instance_q15 * S,
  498. q15_t * pSrc,
  499. uint8_t ifftFlag,
  500. uint8_t bitReverseFlag)
  501. {
  502. uint32_t fftLen = S->fftLen;
  503. if (ifftFlag == 1U) {
  504. switch (fftLen) {
  505. case 16:
  506. case 64:
  507. case 256:
  508. case 1024:
  509. case 4096:
  510. _arm_radix4_butterfly_inverse_q15_mve(S, pSrc, fftLen);
  511. break;
  512. case 32:
  513. case 128:
  514. case 512:
  515. case 2048:
  516. arm_cfft_radix4by2_inverse_q15_mve(S, pSrc, fftLen);
  517. break;
  518. }
  519. } else {
  520. switch (fftLen) {
  521. case 16:
  522. case 64:
  523. case 256:
  524. case 1024:
  525. case 4096:
  526. _arm_radix4_butterfly_q15_mve(S, pSrc, fftLen);
  527. break;
  528. case 32:
  529. case 128:
  530. case 512:
  531. case 2048:
  532. arm_cfft_radix4by2_q15_mve(S, pSrc, fftLen);
  533. break;
  534. }
  535. }
  536. if (bitReverseFlag)
  537. {
  538. arm_bitreversal_16_inpl_mve((uint16_t*)pSrc, S->bitRevLength, S->pBitRevTable);
  539. }
  540. }
  541. #else
  542. extern void arm_radix4_butterfly_q15(
  543. q15_t * pSrc,
  544. uint32_t fftLen,
  545. const q15_t * pCoef,
  546. uint32_t twidCoefModifier);
  547. extern void arm_radix4_butterfly_inverse_q15(
  548. q15_t * pSrc,
  549. uint32_t fftLen,
  550. const q15_t * pCoef,
  551. uint32_t twidCoefModifier);
  552. extern void arm_bitreversal_16(
  553. uint16_t * pSrc,
  554. const uint16_t bitRevLen,
  555. const uint16_t * pBitRevTable);
  556. void arm_cfft_radix4by2_q15(
  557. q15_t * pSrc,
  558. uint32_t fftLen,
  559. const q15_t * pCoef);
  560. void arm_cfft_radix4by2_inverse_q15(
  561. q15_t * pSrc,
  562. uint32_t fftLen,
  563. const q15_t * pCoef);
  564. /**
  565. @ingroup groupTransforms
  566. */
  567. /**
  568. @addtogroup ComplexFFT
  569. @{
  570. */
  571. /**
  572. @brief Processing function for Q15 complex FFT.
  573. @param[in] S points to an instance of Q15 CFFT structure
  574. @param[in,out] p1 points to the complex data buffer of size <code>2*fftLen</code>. Processing occurs in-place
  575. @param[in] ifftFlag flag that selects transform direction
  576. - value = 0: forward transform
  577. - value = 1: inverse transform
  578. @param[in] bitReverseFlag flag that enables / disables bit reversal of output
  579. - value = 0: disables bit reversal of output
  580. - value = 1: enables bit reversal of output
  581. @return none
  582. */
  583. void arm_cfft_q15(
  584. const arm_cfft_instance_q15 * S,
  585. q15_t * p1,
  586. uint8_t ifftFlag,
  587. uint8_t bitReverseFlag)
  588. {
  589. uint32_t L = S->fftLen;
  590. if (ifftFlag == 1U)
  591. {
  592. switch (L)
  593. {
  594. case 16:
  595. case 64:
  596. case 256:
  597. case 1024:
  598. case 4096:
  599. arm_radix4_butterfly_inverse_q15 ( p1, L, (q15_t*)S->pTwiddle, 1 );
  600. break;
  601. case 32:
  602. case 128:
  603. case 512:
  604. case 2048:
  605. arm_cfft_radix4by2_inverse_q15 ( p1, L, S->pTwiddle );
  606. break;
  607. }
  608. }
  609. else
  610. {
  611. switch (L)
  612. {
  613. case 16:
  614. case 64:
  615. case 256:
  616. case 1024:
  617. case 4096:
  618. arm_radix4_butterfly_q15 ( p1, L, (q15_t*)S->pTwiddle, 1 );
  619. break;
  620. case 32:
  621. case 128:
  622. case 512:
  623. case 2048:
  624. arm_cfft_radix4by2_q15 ( p1, L, S->pTwiddle );
  625. break;
  626. }
  627. }
  628. if ( bitReverseFlag )
  629. arm_bitreversal_16 ((uint16_t*) p1, S->bitRevLength, S->pBitRevTable);
  630. }
  631. /**
  632. @} end of ComplexFFT group
  633. */
  634. void arm_cfft_radix4by2_q15(
  635. q15_t * pSrc,
  636. uint32_t fftLen,
  637. const q15_t * pCoef)
  638. {
  639. uint32_t i;
  640. uint32_t n2;
  641. q15_t p0, p1, p2, p3;
  642. #if defined (ARM_MATH_DSP)
  643. q31_t T, S, R;
  644. q31_t coeff, out1, out2;
  645. const q15_t *pC = pCoef;
  646. q15_t *pSi = pSrc;
  647. q15_t *pSl = pSrc + fftLen;
  648. #else
  649. uint32_t l;
  650. q15_t xt, yt, cosVal, sinVal;
  651. #endif
  652. n2 = fftLen >> 1U;
  653. #if defined (ARM_MATH_DSP)
  654. for (i = n2; i > 0; i--)
  655. {
  656. coeff = read_q15x2_ia ((q15_t **) &pC);
  657. T = read_q15x2 (pSi);
  658. T = __SHADD16(T, 0); /* this is just a SIMD arithmetic shift right by 1 */
  659. S = read_q15x2 (pSl);
  660. S = __SHADD16(S, 0); /* this is just a SIMD arithmetic shift right by 1 */
  661. R = __QSUB16(T, S);
  662. write_q15x2_ia (&pSi, __SHADD16(T, S));
  663. #ifndef ARM_MATH_BIG_ENDIAN
  664. out1 = __SMUAD(coeff, R) >> 16U;
  665. out2 = __SMUSDX(coeff, R);
  666. #else
  667. out1 = __SMUSDX(R, coeff) >> 16U;
  668. out2 = __SMUAD(coeff, R);
  669. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  670. write_q15x2_ia (&pSl, (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF));
  671. }
  672. #else /* #if defined (ARM_MATH_DSP) */
  673. for (i = 0; i < n2; i++)
  674. {
  675. cosVal = pCoef[2 * i];
  676. sinVal = pCoef[2 * i + 1];
  677. l = i + n2;
  678. xt = (pSrc[2 * i] >> 1U) - (pSrc[2 * l] >> 1U);
  679. pSrc[2 * i] = ((pSrc[2 * i] >> 1U) + (pSrc[2 * l] >> 1U)) >> 1U;
  680. yt = (pSrc[2 * i + 1] >> 1U) - (pSrc[2 * l + 1] >> 1U);
  681. pSrc[2 * i + 1] = ((pSrc[2 * l + 1] >> 1U) + (pSrc[2 * i + 1] >> 1U)) >> 1U;
  682. pSrc[2 * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16U)) +
  683. ((int16_t) (((q31_t) yt * sinVal) >> 16U)) );
  684. pSrc[2 * l + 1] = (((int16_t) (((q31_t) yt * cosVal) >> 16U)) -
  685. ((int16_t) (((q31_t) xt * sinVal) >> 16U)) );
  686. }
  687. #endif /* #if defined (ARM_MATH_DSP) */
  688. /* first col */
  689. arm_radix4_butterfly_q15( pSrc, n2, (q15_t*)pCoef, 2U);
  690. /* second col */
  691. arm_radix4_butterfly_q15( pSrc + fftLen, n2, (q15_t*)pCoef, 2U);
  692. n2 = fftLen >> 1U;
  693. for (i = 0; i < n2; i++)
  694. {
  695. p0 = pSrc[4 * i + 0];
  696. p1 = pSrc[4 * i + 1];
  697. p2 = pSrc[4 * i + 2];
  698. p3 = pSrc[4 * i + 3];
  699. p0 <<= 1U;
  700. p1 <<= 1U;
  701. p2 <<= 1U;
  702. p3 <<= 1U;
  703. pSrc[4 * i + 0] = p0;
  704. pSrc[4 * i + 1] = p1;
  705. pSrc[4 * i + 2] = p2;
  706. pSrc[4 * i + 3] = p3;
  707. }
  708. }
  709. void arm_cfft_radix4by2_inverse_q15(
  710. q15_t * pSrc,
  711. uint32_t fftLen,
  712. const q15_t * pCoef)
  713. {
  714. uint32_t i;
  715. uint32_t n2;
  716. q15_t p0, p1, p2, p3;
  717. #if defined (ARM_MATH_DSP)
  718. q31_t T, S, R;
  719. q31_t coeff, out1, out2;
  720. const q15_t *pC = pCoef;
  721. q15_t *pSi = pSrc;
  722. q15_t *pSl = pSrc + fftLen;
  723. #else
  724. uint32_t l;
  725. q15_t xt, yt, cosVal, sinVal;
  726. #endif
  727. n2 = fftLen >> 1U;
  728. #if defined (ARM_MATH_DSP)
  729. for (i = n2; i > 0; i--)
  730. {
  731. coeff = read_q15x2_ia ((q15_t **) &pC);
  732. T = read_q15x2 (pSi);
  733. T = __SHADD16(T, 0); /* this is just a SIMD arithmetic shift right by 1 */
  734. S = read_q15x2 (pSl);
  735. S = __SHADD16(S, 0); /* this is just a SIMD arithmetic shift right by 1 */
  736. R = __QSUB16(T, S);
  737. write_q15x2_ia (&pSi, __SHADD16(T, S));
  738. #ifndef ARM_MATH_BIG_ENDIAN
  739. out1 = __SMUSD(coeff, R) >> 16U;
  740. out2 = __SMUADX(coeff, R);
  741. #else
  742. out1 = __SMUADX(R, coeff) >> 16U;
  743. out2 = __SMUSD(__QSUB(0, coeff), R);
  744. #endif /* #ifndef ARM_MATH_BIG_ENDIAN */
  745. write_q15x2_ia (&pSl, (q31_t) ((out2) & 0xFFFF0000) | (out1 & 0x0000FFFF));
  746. }
  747. #else /* #if defined (ARM_MATH_DSP) */
  748. for (i = 0; i < n2; i++)
  749. {
  750. cosVal = pCoef[2 * i];
  751. sinVal = pCoef[2 * i + 1];
  752. l = i + n2;
  753. xt = (pSrc[2 * i] >> 1U) - (pSrc[2 * l] >> 1U);
  754. pSrc[2 * i] = ((pSrc[2 * i] >> 1U) + (pSrc[2 * l] >> 1U)) >> 1U;
  755. yt = (pSrc[2 * i + 1] >> 1U) - (pSrc[2 * l + 1] >> 1U);
  756. pSrc[2 * i + 1] = ((pSrc[2 * l + 1] >> 1U) + (pSrc[2 * i + 1] >> 1U)) >> 1U;
  757. pSrc[2 * l] = (((int16_t) (((q31_t) xt * cosVal) >> 16U)) -
  758. ((int16_t) (((q31_t) yt * sinVal) >> 16U)) );
  759. pSrc[2 * l + 1] = (((int16_t) (((q31_t) yt * cosVal) >> 16U)) +
  760. ((int16_t) (((q31_t) xt * sinVal) >> 16U)) );
  761. }
  762. #endif /* #if defined (ARM_MATH_DSP) */
  763. /* first col */
  764. arm_radix4_butterfly_inverse_q15( pSrc, n2, (q15_t*)pCoef, 2U);
  765. /* second col */
  766. arm_radix4_butterfly_inverse_q15( pSrc + fftLen, n2, (q15_t*)pCoef, 2U);
  767. n2 = fftLen >> 1U;
  768. for (i = 0; i < n2; i++)
  769. {
  770. p0 = pSrc[4 * i + 0];
  771. p1 = pSrc[4 * i + 1];
  772. p2 = pSrc[4 * i + 2];
  773. p3 = pSrc[4 * i + 3];
  774. p0 <<= 1U;
  775. p1 <<= 1U;
  776. p2 <<= 1U;
  777. p3 <<= 1U;
  778. pSrc[4 * i + 0] = p0;
  779. pSrc[4 * i + 1] = p1;
  780. pSrc[4 * i + 2] = p2;
  781. pSrc[4 * i + 3] = p3;
  782. }
  783. }
  784. #endif /* defined(ARM_MATH_MVEI) */