arm_helium_utils.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. /* ----------------------------------------------------------------------
  2. * Project: CMSIS DSP Library
  3. * Title: arm_helium_utils.h
  4. * Description: Utility functions for Helium development
  5. *
  6. * @version V1.10.0
  7. * @date 08 July 2021
  8. *
  9. * Target Processor: Cortex-M and Cortex-A cores
  10. * -------------------------------------------------------------------- */
  11. /*
  12. * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
  13. *
  14. * SPDX-License-Identifier: Apache-2.0
  15. *
  16. * Licensed under the Apache License, Version 2.0 (the License); you may
  17. * not use this file except in compliance with the License.
  18. * You may obtain a copy of the License at
  19. *
  20. * www.apache.org/licenses/LICENSE-2.0
  21. *
  22. * Unless required by applicable law or agreed to in writing, software
  23. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  24. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25. * See the License for the specific language governing permissions and
  26. * limitations under the License.
  27. */
  28. #ifndef _ARM_UTILS_HELIUM_H_
  29. #define _ARM_UTILS_HELIUM_H_
  30. #ifdef __cplusplus
  31. extern "C"
  32. {
  33. #endif
  34. /***************************************
  35. Definitions available for MVEF and MVEI
  36. ***************************************/
  37. #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
  38. #define INACTIVELANE 0 /* inactive lane content */
  39. #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) */
  40. /***************************************
  41. Definitions available for MVEF only
  42. ***************************************/
  43. #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE)
  44. __STATIC_FORCEINLINE float32_t vecAddAcrossF32Mve(float32x4_t in)
  45. {
  46. float32_t acc;
  47. acc = vgetq_lane(in, 0) + vgetq_lane(in, 1) +
  48. vgetq_lane(in, 2) + vgetq_lane(in, 3);
  49. return acc;
  50. }
  51. /* newton initial guess */
  52. #define INVSQRT_MAGIC_F32 0x5f3759df
  53. #define INV_NEWTON_INIT_F32 0x7EF127EA
  54. #define INVSQRT_NEWTON_MVE_F32(invSqrt, xHalf, xStart)\
  55. { \
  56. float32x4_t tmp; \
  57. \
  58. /* tmp = xhalf * x * x */ \
  59. tmp = vmulq(xStart, xStart); \
  60. tmp = vmulq(tmp, xHalf); \
  61. /* (1.5f - xhalf * x * x) */ \
  62. tmp = vsubq(vdupq_n_f32(1.5f), tmp); \
  63. /* x = x*(1.5f-xhalf*x*x); */ \
  64. invSqrt = vmulq(tmp, xStart); \
  65. }
  66. #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) */
  67. /***************************************
  68. Definitions available for f16 datatype with HW acceleration only
  69. ***************************************/
  70. #if defined(ARM_FLOAT16_SUPPORTED)
  71. #if defined (ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
  72. __STATIC_FORCEINLINE float16_t vecAddAcrossF16Mve(float16x8_t in)
  73. {
  74. float16x8_t tmpVec;
  75. _Float16 acc;
  76. tmpVec = (float16x8_t) vrev32q_s16((int16x8_t) in);
  77. in = vaddq_f16(tmpVec, in);
  78. tmpVec = (float16x8_t) vrev64q_s32((int32x4_t) in);
  79. in = vaddq_f16(tmpVec, in);
  80. acc = (_Float16)vgetq_lane_f16(in, 0) + (_Float16)vgetq_lane_f16(in, 4);
  81. return acc;
  82. }
  83. __STATIC_FORCEINLINE float16x8_t __mve_cmplx_sum_intra_vec_f16(
  84. float16x8_t vecIn)
  85. {
  86. float16x8_t vecTmp, vecOut;
  87. uint32_t tmp;
  88. vecTmp = (float16x8_t) vrev64q_s32((int32x4_t) vecIn);
  89. // TO TRACK : using canonical addition leads to unefficient code generation for f16
  90. // vecTmp = vecTmp + vecAccCpx0;
  91. /*
  92. * Compute
  93. * re0+re1 | im0+im1 | re0+re1 | im0+im1
  94. * re2+re3 | im2+im3 | re2+re3 | im2+im3
  95. */
  96. vecTmp = vaddq_f16(vecTmp, vecIn);
  97. vecOut = vecTmp;
  98. /*
  99. * shift left, random tmp insertion in bottom
  100. */
  101. vecOut = vreinterpretq_f16_s32(vshlcq_s32(vreinterpretq_s32_f16(vecOut) , &tmp, 32));
  102. /*
  103. * Compute:
  104. * DONTCARE | DONTCARE | re0+re1+re0+re1 |im0+im1+im0+im1
  105. * re0+re1+re2+re3 | im0+im1+im2+im3 | re2+re3+re2+re3 |im2+im3+im2+im3
  106. */
  107. vecOut = vaddq_f16(vecOut, vecTmp);
  108. /*
  109. * Cmplx sum is in 4rd & 5th f16 elt
  110. * return full vector
  111. */
  112. return vecOut;
  113. }
  114. #define mve_cmplx_sum_intra_r_i_f16(vec, Re, Im) \
  115. { \
  116. float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vec); \
  117. Re = vgetq_lane(vecOut, 4); \
  118. Im = vgetq_lane(vecOut, 5); \
  119. }
  120. __STATIC_FORCEINLINE void mve_cmplx_sum_intra_vec_f16(
  121. float16x8_t vecIn,
  122. float16_t *pOut)
  123. {
  124. float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vecIn);
  125. /*
  126. * Cmplx sum is in 4rd & 5th f16 elt
  127. * use 32-bit extraction
  128. */
  129. *(float32_t *) pOut = ((float32x4_t) vecOut)[2];
  130. }
  131. #define INVSQRT_MAGIC_F16 0x59ba /* ( 0x1ba = 0x3759df >> 13) */
  132. /* canonical version of INVSQRT_NEWTON_MVE_F16 leads to bad performance */
  133. #define INVSQRT_NEWTON_MVE_F16(invSqrt, xHalf, xStart) \
  134. { \
  135. float16x8_t tmp; \
  136. \
  137. /* tmp = xhalf * x * x */ \
  138. tmp = vmulq(xStart, xStart); \
  139. tmp = vmulq(tmp, xHalf); \
  140. /* (1.5f - xhalf * x * x) */ \
  141. tmp = vsubq(vdupq_n_f16((float16_t)1.5), tmp); \
  142. /* x = x*(1.5f-xhalf*x*x); */ \
  143. invSqrt = vmulq(tmp, xStart); \
  144. }
  145. #endif
  146. #endif
  147. /***************************************
  148. Definitions available for MVEI and MVEF only
  149. ***************************************/
  150. #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
  151. /* Following functions are used to transpose matrix in f32 and q31 cases */
  152. __STATIC_INLINE arm_status arm_mat_trans_32bit_2x2_mve(
  153. uint32_t * pDataSrc,
  154. uint32_t * pDataDest)
  155. {
  156. static const uint32x4_t vecOffs = { 0, 2, 1, 3 };
  157. /*
  158. *
  159. * | 0 1 | => | 0 2 |
  160. * | 2 3 | | 1 3 |
  161. *
  162. */
  163. uint32x4_t vecIn = vldrwq_u32((uint32_t const *)pDataSrc);
  164. vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs, vecIn);
  165. return (ARM_MATH_SUCCESS);
  166. }
  167. __STATIC_INLINE arm_status arm_mat_trans_32bit_3x3_mve(
  168. uint32_t * pDataSrc,
  169. uint32_t * pDataDest)
  170. {
  171. const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
  172. const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
  173. /*
  174. *
  175. * | 0 1 2 | | 0 3 6 | 4 x 32 flattened version | 0 3 6 1 |
  176. * | 3 4 5 | => | 1 4 7 | => | 4 7 2 5 |
  177. * | 6 7 8 | | 2 5 8 | (row major) | 8 . . . |
  178. *
  179. */
  180. uint32x4_t vecIn1 = vldrwq_u32((uint32_t const *) pDataSrc);
  181. uint32x4_t vecIn2 = vldrwq_u32((uint32_t const *) &pDataSrc[4]);
  182. vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs1, vecIn1);
  183. vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs2, vecIn2);
  184. pDataDest[8] = pDataSrc[8];
  185. return (ARM_MATH_SUCCESS);
  186. }
  187. __STATIC_INLINE arm_status arm_mat_trans_32bit_4x4_mve(uint32_t * pDataSrc, uint32_t * pDataDest)
  188. {
  189. /*
  190. * 4x4 Matrix transposition
  191. * is 4 x de-interleave operation
  192. *
  193. * 0 1 2 3 0 4 8 12
  194. * 4 5 6 7 1 5 9 13
  195. * 8 9 10 11 2 6 10 14
  196. * 12 13 14 15 3 7 11 15
  197. */
  198. uint32x4x4_t vecIn;
  199. vecIn = vld4q((uint32_t const *) pDataSrc);
  200. vstrwq(pDataDest, vecIn.val[0]);
  201. pDataDest += 4;
  202. vstrwq(pDataDest, vecIn.val[1]);
  203. pDataDest += 4;
  204. vstrwq(pDataDest, vecIn.val[2]);
  205. pDataDest += 4;
  206. vstrwq(pDataDest, vecIn.val[3]);
  207. return (ARM_MATH_SUCCESS);
  208. }
  209. __STATIC_INLINE arm_status arm_mat_trans_32bit_generic_mve(
  210. uint16_t srcRows,
  211. uint16_t srcCols,
  212. uint32_t * pDataSrc,
  213. uint32_t * pDataDest)
  214. {
  215. uint32x4_t vecOffs;
  216. uint32_t i;
  217. uint32_t blkCnt;
  218. uint32_t const *pDataC;
  219. uint32_t *pDataDestR;
  220. uint32x4_t vecIn;
  221. vecOffs = vidupq_u32((uint32_t)0, 1);
  222. vecOffs = vecOffs * srcCols;
  223. i = srcCols;
  224. do
  225. {
  226. pDataC = (uint32_t const *) pDataSrc;
  227. pDataDestR = pDataDest;
  228. blkCnt = srcRows >> 2;
  229. while (blkCnt > 0U)
  230. {
  231. vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs);
  232. vstrwq(pDataDestR, vecIn);
  233. pDataDestR += 4;
  234. pDataC = pDataC + srcCols * 4;
  235. /*
  236. * Decrement the blockSize loop counter
  237. */
  238. blkCnt--;
  239. }
  240. /*
  241. * tail
  242. */
  243. blkCnt = srcRows & 3;
  244. if (blkCnt > 0U)
  245. {
  246. mve_pred16_t p0 = vctp32q(blkCnt);
  247. vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs);
  248. vstrwq_p(pDataDestR, vecIn, p0);
  249. }
  250. pDataSrc += 1;
  251. pDataDest += srcRows;
  252. }
  253. while (--i);
  254. return (ARM_MATH_SUCCESS);
  255. }
  256. __STATIC_INLINE arm_status arm_mat_cmplx_trans_32bit(
  257. uint16_t srcRows,
  258. uint16_t srcCols,
  259. uint32_t *pDataSrc,
  260. uint16_t dstRows,
  261. uint16_t dstCols,
  262. uint32_t *pDataDest)
  263. {
  264. uint32_t i;
  265. uint32_t const *pDataC;
  266. uint32_t *pDataRow;
  267. uint32_t *pDataDestR, *pDataDestRow;
  268. uint32x4_t vecOffsRef, vecOffsCur;
  269. uint32_t blkCnt;
  270. uint32x4_t vecIn;
  271. #ifdef ARM_MATH_MATRIX_CHECK
  272. /*
  273. * Check for matrix mismatch condition
  274. */
  275. if ((srcRows != dstCols) || (srcCols != dstRows))
  276. {
  277. /*
  278. * Set status as ARM_MATH_SIZE_MISMATCH
  279. */
  280. return ARM_MATH_SIZE_MISMATCH;
  281. }
  282. #else
  283. (void)dstRows;
  284. (void)dstCols;
  285. #endif
  286. /* 2x2, 3x3 and 4x4 specialization to be added */
  287. vecOffsRef[0] = 0;
  288. vecOffsRef[1] = 1;
  289. vecOffsRef[2] = srcCols << 1;
  290. vecOffsRef[3] = (srcCols << 1) + 1;
  291. pDataRow = pDataSrc;
  292. pDataDestRow = pDataDest;
  293. i = srcCols;
  294. do
  295. {
  296. pDataC = (uint32_t const *) pDataRow;
  297. pDataDestR = pDataDestRow;
  298. vecOffsCur = vecOffsRef;
  299. blkCnt = (srcRows * CMPLX_DIM) >> 2;
  300. while (blkCnt > 0U)
  301. {
  302. vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur);
  303. vstrwq(pDataDestR, vecIn);
  304. pDataDestR += 4;
  305. vecOffsCur = vaddq(vecOffsCur, (srcCols << 2));
  306. /*
  307. * Decrement the blockSize loop counter
  308. */
  309. blkCnt--;
  310. }
  311. /*
  312. * tail
  313. * (will be merged thru tail predication)
  314. */
  315. blkCnt = (srcRows * CMPLX_DIM) & 3;
  316. if (blkCnt > 0U)
  317. {
  318. mve_pred16_t p0 = vctp32q(blkCnt);
  319. vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur);
  320. vstrwq_p(pDataDestR, vecIn, p0);
  321. }
  322. pDataRow += CMPLX_DIM;
  323. pDataDestRow += (srcRows * CMPLX_DIM);
  324. }
  325. while (--i);
  326. return (ARM_MATH_SUCCESS);
  327. }
  328. __STATIC_INLINE arm_status arm_mat_trans_16bit_2x2(uint16_t * pDataSrc, uint16_t * pDataDest)
  329. {
  330. pDataDest[0] = pDataSrc[0];
  331. pDataDest[3] = pDataSrc[3];
  332. pDataDest[2] = pDataSrc[1];
  333. pDataDest[1] = pDataSrc[2];
  334. return (ARM_MATH_SUCCESS);
  335. }
  336. __STATIC_INLINE arm_status arm_mat_trans_16bit_3x3_mve(uint16_t * pDataSrc, uint16_t * pDataDest)
  337. {
  338. static const uint16_t stridesTr33[8] = { 0, 3, 6, 1, 4, 7, 2, 5 };
  339. uint16x8_t vecOffs1;
  340. uint16x8_t vecIn1;
  341. /*
  342. *
  343. * | 0 1 2 | | 0 3 6 | 8 x 16 flattened version | 0 3 6 1 4 7 2 5 |
  344. * | 3 4 5 | => | 1 4 7 | => | 8 . . . . . . . |
  345. * | 6 7 8 | | 2 5 8 | (row major)
  346. *
  347. */
  348. vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr33);
  349. vecIn1 = vldrhq_u16((uint16_t const *) pDataSrc);
  350. vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1);
  351. pDataDest[8] = pDataSrc[8];
  352. return (ARM_MATH_SUCCESS);
  353. }
  354. __STATIC_INLINE arm_status arm_mat_trans_16bit_4x4_mve(uint16_t * pDataSrc, uint16_t * pDataDest)
  355. {
  356. static const uint16_t stridesTr44_1[8] = { 0, 4, 8, 12, 1, 5, 9, 13 };
  357. static const uint16_t stridesTr44_2[8] = { 2, 6, 10, 14, 3, 7, 11, 15 };
  358. uint16x8_t vecOffs1, vecOffs2;
  359. uint16x8_t vecIn1, vecIn2;
  360. uint16_t const * pDataSrcVec = (uint16_t const *) pDataSrc;
  361. /*
  362. * 4x4 Matrix transposition
  363. *
  364. * | 0 1 2 3 | | 0 4 8 12 | 8 x 16 flattened version
  365. * | 4 5 6 7 | => | 1 5 9 13 | => [0 4 8 12 1 5 9 13]
  366. * | 8 9 10 11 | | 2 6 10 14 | [2 6 10 14 3 7 11 15]
  367. * | 12 13 14 15 | | 3 7 11 15 |
  368. */
  369. vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr44_1);
  370. vecOffs2 = vldrhq_u16((uint16_t const *) stridesTr44_2);
  371. vecIn1 = vldrhq_u16(pDataSrcVec);
  372. pDataSrcVec += 8;
  373. vecIn2 = vldrhq_u16(pDataSrcVec);
  374. vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1);
  375. vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs2, vecIn2);
  376. return (ARM_MATH_SUCCESS);
  377. }
  378. __STATIC_INLINE arm_status arm_mat_trans_16bit_generic(
  379. uint16_t srcRows,
  380. uint16_t srcCols,
  381. uint16_t * pDataSrc,
  382. uint16_t * pDataDest)
  383. {
  384. uint16x8_t vecOffs;
  385. uint32_t i;
  386. uint32_t blkCnt;
  387. uint16_t const *pDataC;
  388. uint16_t *pDataDestR;
  389. uint16x8_t vecIn;
  390. vecOffs = vidupq_u16((uint32_t)0, 1);
  391. vecOffs = vecOffs * srcCols;
  392. i = srcCols;
  393. while(i > 0U)
  394. {
  395. pDataC = (uint16_t const *) pDataSrc;
  396. pDataDestR = pDataDest;
  397. blkCnt = srcRows >> 3;
  398. while (blkCnt > 0U)
  399. {
  400. vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs);
  401. vstrhq_u16(pDataDestR, vecIn);
  402. pDataDestR += 8;
  403. pDataC = pDataC + srcCols * 8;
  404. /*
  405. * Decrement the blockSize loop counter
  406. */
  407. blkCnt--;
  408. }
  409. /*
  410. * tail
  411. */
  412. blkCnt = srcRows & 7;
  413. if (blkCnt > 0U)
  414. {
  415. mve_pred16_t p0 = vctp16q(blkCnt);
  416. vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs);
  417. vstrhq_p_u16(pDataDestR, vecIn, p0);
  418. }
  419. pDataSrc += 1;
  420. pDataDest += srcRows;
  421. i--;
  422. }
  423. return (ARM_MATH_SUCCESS);
  424. }
  425. __STATIC_INLINE arm_status arm_mat_cmplx_trans_16bit(
  426. uint16_t srcRows,
  427. uint16_t srcCols,
  428. uint16_t *pDataSrc,
  429. uint16_t dstRows,
  430. uint16_t dstCols,
  431. uint16_t *pDataDest)
  432. {
  433. static const uint16_t loadCmplxCol[8] = { 0, 0, 1, 1, 2, 2, 3, 3 };
  434. int i;
  435. uint16x8_t vecOffsRef, vecOffsCur;
  436. uint16_t const *pDataC;
  437. uint16_t *pDataRow;
  438. uint16_t *pDataDestR, *pDataDestRow;
  439. uint32_t blkCnt;
  440. uint16x8_t vecIn;
  441. #ifdef ARM_MATH_MATRIX_CHECK
  442. /*
  443. * Check for matrix mismatch condition
  444. */
  445. if ((srcRows != dstCols) || (srcCols != dstRows))
  446. {
  447. /*
  448. * Set status as ARM_MATH_SIZE_MISMATCH
  449. */
  450. return ARM_MATH_SIZE_MISMATCH;
  451. }
  452. #else
  453. (void)dstRows;
  454. (void)dstCols;
  455. #endif
  456. /*
  457. * 2x2, 3x3 and 4x4 specialization to be added
  458. */
  459. /*
  460. * build [0, 1, 2xcol, 2xcol+1, 4xcol, 4xcol+1, 6xcol, 6xcol+1]
  461. */
  462. vecOffsRef = vldrhq_u16((uint16_t const *) loadCmplxCol);
  463. vecOffsRef = vmulq(vecOffsRef, (uint16_t) (srcCols * CMPLX_DIM))
  464. + viwdupq_u16((uint32_t)0, (uint16_t) 2, 1);
  465. pDataRow = pDataSrc;
  466. pDataDestRow = pDataDest;
  467. i = srcCols;
  468. do
  469. {
  470. pDataC = (uint16_t const *) pDataRow;
  471. pDataDestR = pDataDestRow;
  472. vecOffsCur = vecOffsRef;
  473. blkCnt = (srcRows * CMPLX_DIM) >> 3;
  474. while (blkCnt > 0U)
  475. {
  476. vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur);
  477. vstrhq(pDataDestR, vecIn);
  478. pDataDestR+= 8; // VEC_LANES_U16
  479. vecOffsCur = vaddq(vecOffsCur, (srcCols << 3));
  480. /*
  481. * Decrement the blockSize loop counter
  482. */
  483. blkCnt--;
  484. }
  485. /*
  486. * tail
  487. * (will be merged thru tail predication)
  488. */
  489. blkCnt = (srcRows * CMPLX_DIM) & 0x7;
  490. if (blkCnt > 0U)
  491. {
  492. mve_pred16_t p0 = vctp16q(blkCnt);
  493. vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur);
  494. vstrhq_p(pDataDestR, vecIn, p0);
  495. }
  496. pDataRow += CMPLX_DIM;
  497. pDataDestRow += (srcRows * CMPLX_DIM);
  498. }
  499. while (--i);
  500. return (ARM_MATH_SUCCESS);
  501. }
  502. #endif /* MVEF and MVEI */
  503. /***************************************
  504. Definitions available for MVEI only
  505. ***************************************/
  506. #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
  507. #include "arm_common_tables.h"
  508. #define MVE_ASRL_SAT16(acc, shift) ((sqrshrl_sat48(acc, -(32-shift)) >> 32) & 0xffffffff)
  509. #define MVE_ASRL_SAT32(acc, shift) ((sqrshrl(acc, -(32-shift)) >> 32) & 0xffffffff)
  510. #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE)
  511. __STATIC_INLINE q31x4_t FAST_VSQRT_Q31(q31x4_t vecIn)
  512. {
  513. q63x2_t vecTmpLL;
  514. q31x4_t vecTmp0, vecTmp1;
  515. q31_t scale;
  516. q63_t tmp64;
  517. q31x4_t vecNrm, vecDst, vecIdx, vecSignBits;
  518. vecSignBits = vclsq(vecIn);
  519. vecSignBits = vbicq_n_s32(vecSignBits, 1);
  520. /*
  521. * in = in << no_of_sign_bits;
  522. */
  523. vecNrm = vshlq(vecIn, vecSignBits);
  524. /*
  525. * index = in >> 24;
  526. */
  527. vecIdx = vecNrm >> 24;
  528. vecIdx = vecIdx << 1;
  529. vecTmp0 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx);
  530. vecIdx = vecIdx + 1;
  531. vecTmp1 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx);
  532. vecTmp1 = vqrdmulhq(vecTmp1, vecNrm);
  533. vecTmp0 = vecTmp0 - vecTmp1;
  534. vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0);
  535. vecTmp1 = vqrdmulhq(vecNrm, vecTmp1);
  536. vecTmp1 = vdupq_n_s32(0x18000000) - vecTmp1;
  537. vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1);
  538. vecTmpLL = vmullbq_int(vecNrm, vecTmp0);
  539. /*
  540. * scale elements 0, 2
  541. */
  542. scale = 26 + (vecSignBits[0] >> 1);
  543. tmp64 = asrl(vecTmpLL[0], scale);
  544. vecDst[0] = (q31_t) tmp64;
  545. scale = 26 + (vecSignBits[2] >> 1);
  546. tmp64 = asrl(vecTmpLL[1], scale);
  547. vecDst[2] = (q31_t) tmp64;
  548. vecTmpLL = vmulltq_int(vecNrm, vecTmp0);
  549. /*
  550. * scale elements 1, 3
  551. */
  552. scale = 26 + (vecSignBits[1] >> 1);
  553. tmp64 = asrl(vecTmpLL[0], scale);
  554. vecDst[1] = (q31_t) tmp64;
  555. scale = 26 + (vecSignBits[3] >> 1);
  556. tmp64 = asrl(vecTmpLL[1], scale);
  557. vecDst[3] = (q31_t) tmp64;
  558. /*
  559. * set negative values to 0
  560. */
  561. vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s32(vecIn, 0));
  562. return vecDst;
  563. }
  564. #endif
  565. #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE)
  566. __STATIC_INLINE q15x8_t FAST_VSQRT_Q15(q15x8_t vecIn)
  567. {
  568. q31x4_t vecTmpLev, vecTmpLodd, vecSignL;
  569. q15x8_t vecTmp0, vecTmp1;
  570. q15x8_t vecNrm, vecDst, vecIdx, vecSignBits;
  571. vecDst = vuninitializedq_s16();
  572. vecSignBits = vclsq(vecIn);
  573. vecSignBits = vbicq_n_s16(vecSignBits, 1);
  574. /*
  575. * in = in << no_of_sign_bits;
  576. */
  577. vecNrm = vshlq(vecIn, vecSignBits);
  578. vecIdx = vecNrm >> 8;
  579. vecIdx = vecIdx << 1;
  580. vecTmp0 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx);
  581. vecIdx = vecIdx + 1;
  582. vecTmp1 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx);
  583. vecTmp1 = vqrdmulhq(vecTmp1, vecNrm);
  584. vecTmp0 = vecTmp0 - vecTmp1;
  585. vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0);
  586. vecTmp1 = vqrdmulhq(vecNrm, vecTmp1);
  587. vecTmp1 = vdupq_n_s16(0x1800) - vecTmp1;
  588. vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1);
  589. vecSignBits = vecSignBits >> 1;
  590. vecTmpLev = vmullbq_int(vecNrm, vecTmp0);
  591. vecTmpLodd = vmulltq_int(vecNrm, vecTmp0);
  592. vecTmp0 = vecSignBits + 10;
  593. /*
  594. * negate sign to apply register based vshl
  595. */
  596. vecTmp0 = -vecTmp0;
  597. /*
  598. * shift even elements
  599. */
  600. vecSignL = vmovlbq(vecTmp0);
  601. vecTmpLev = vshlq(vecTmpLev, vecSignL);
  602. /*
  603. * shift odd elements
  604. */
  605. vecSignL = vmovltq(vecTmp0);
  606. vecTmpLodd = vshlq(vecTmpLodd, vecSignL);
  607. /*
  608. * merge and narrow odd and even parts
  609. */
  610. vecDst = vmovnbq_s32(vecDst, vecTmpLev);
  611. vecDst = vmovntq_s32(vecDst, vecTmpLodd);
  612. /*
  613. * set negative values to 0
  614. */
  615. vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s16(vecIn, 0));
  616. return vecDst;
  617. }
  618. #endif
  619. #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI) */
  620. #ifdef __cplusplus
  621. }
  622. #endif
  623. #endif