| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753 |
- /* ----------------------------------------------------------------------
- * Project: CMSIS DSP Library
- * Title: arm_helium_utils.h
- * Description: Utility functions for Helium development
- *
- * @version V1.10.0
- * @date 08 July 2021
- *
- * Target Processor: Cortex-M and Cortex-A cores
- * -------------------------------------------------------------------- */
- /*
- * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
- *
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the License); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- #ifndef _ARM_UTILS_HELIUM_H_
- #define _ARM_UTILS_HELIUM_H_
- #ifdef __cplusplus
- extern "C"
- {
- #endif
- /***************************************
- Definitions available for MVEF and MVEI
- ***************************************/
- #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
- #define INACTIVELANE 0 /* inactive lane content */
- #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) */
- /***************************************
- Definitions available for MVEF only
- ***************************************/
- #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE)
- __STATIC_FORCEINLINE float32_t vecAddAcrossF32Mve(float32x4_t in)
- {
- float32_t acc;
- acc = vgetq_lane(in, 0) + vgetq_lane(in, 1) +
- vgetq_lane(in, 2) + vgetq_lane(in, 3);
- return acc;
- }
- /* newton initial guess */
- #define INVSQRT_MAGIC_F32 0x5f3759df
- #define INV_NEWTON_INIT_F32 0x7EF127EA
- #define INVSQRT_NEWTON_MVE_F32(invSqrt, xHalf, xStart)\
- { \
- float32x4_t tmp; \
- \
- /* tmp = xhalf * x * x */ \
- tmp = vmulq(xStart, xStart); \
- tmp = vmulq(tmp, xHalf); \
- /* (1.5f - xhalf * x * x) */ \
- tmp = vsubq(vdupq_n_f32(1.5f), tmp); \
- /* x = x*(1.5f-xhalf*x*x); */ \
- invSqrt = vmulq(tmp, xStart); \
- }
- #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) */
- /***************************************
- Definitions available for f16 datatype with HW acceleration only
- ***************************************/
- #if defined(ARM_FLOAT16_SUPPORTED)
- #if defined (ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)
- __STATIC_FORCEINLINE float16_t vecAddAcrossF16Mve(float16x8_t in)
- {
- float16x8_t tmpVec;
- _Float16 acc;
- tmpVec = (float16x8_t) vrev32q_s16((int16x8_t) in);
- in = vaddq_f16(tmpVec, in);
- tmpVec = (float16x8_t) vrev64q_s32((int32x4_t) in);
- in = vaddq_f16(tmpVec, in);
- acc = (_Float16)vgetq_lane_f16(in, 0) + (_Float16)vgetq_lane_f16(in, 4);
- return acc;
- }
- __STATIC_FORCEINLINE float16x8_t __mve_cmplx_sum_intra_vec_f16(
- float16x8_t vecIn)
- {
- float16x8_t vecTmp, vecOut;
- uint32_t tmp;
- vecTmp = (float16x8_t) vrev64q_s32((int32x4_t) vecIn);
- // TO TRACK : using canonical addition leads to unefficient code generation for f16
- // vecTmp = vecTmp + vecAccCpx0;
- /*
- * Compute
- * re0+re1 | im0+im1 | re0+re1 | im0+im1
- * re2+re3 | im2+im3 | re2+re3 | im2+im3
- */
- vecTmp = vaddq_f16(vecTmp, vecIn);
- vecOut = vecTmp;
- /*
- * shift left, random tmp insertion in bottom
- */
- vecOut = vreinterpretq_f16_s32(vshlcq_s32(vreinterpretq_s32_f16(vecOut) , &tmp, 32));
- /*
- * Compute:
- * DONTCARE | DONTCARE | re0+re1+re0+re1 |im0+im1+im0+im1
- * re0+re1+re2+re3 | im0+im1+im2+im3 | re2+re3+re2+re3 |im2+im3+im2+im3
- */
- vecOut = vaddq_f16(vecOut, vecTmp);
- /*
- * Cmplx sum is in 4rd & 5th f16 elt
- * return full vector
- */
- return vecOut;
- }
- #define mve_cmplx_sum_intra_r_i_f16(vec, Re, Im) \
- { \
- float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vec); \
- Re = vgetq_lane(vecOut, 4); \
- Im = vgetq_lane(vecOut, 5); \
- }
- __STATIC_FORCEINLINE void mve_cmplx_sum_intra_vec_f16(
- float16x8_t vecIn,
- float16_t *pOut)
- {
- float16x8_t vecOut = __mve_cmplx_sum_intra_vec_f16(vecIn);
- /*
- * Cmplx sum is in 4rd & 5th f16 elt
- * use 32-bit extraction
- */
- *(float32_t *) pOut = ((float32x4_t) vecOut)[2];
- }
- #define INVSQRT_MAGIC_F16 0x59ba /* ( 0x1ba = 0x3759df >> 13) */
- /* canonical version of INVSQRT_NEWTON_MVE_F16 leads to bad performance */
- #define INVSQRT_NEWTON_MVE_F16(invSqrt, xHalf, xStart) \
- { \
- float16x8_t tmp; \
- \
- /* tmp = xhalf * x * x */ \
- tmp = vmulq(xStart, xStart); \
- tmp = vmulq(tmp, xHalf); \
- /* (1.5f - xhalf * x * x) */ \
- tmp = vsubq(vdupq_n_f16((float16_t)1.5), tmp); \
- /* x = x*(1.5f-xhalf*x*x); */ \
- invSqrt = vmulq(tmp, xStart); \
- }
- #endif
- #endif
- /***************************************
- Definitions available for MVEI and MVEF only
- ***************************************/
- #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
- /* Following functions are used to transpose matrix in f32 and q31 cases */
- __STATIC_INLINE arm_status arm_mat_trans_32bit_2x2_mve(
- uint32_t * pDataSrc,
- uint32_t * pDataDest)
- {
- static const uint32x4_t vecOffs = { 0, 2, 1, 3 };
- /*
- *
- * | 0 1 | => | 0 2 |
- * | 2 3 | | 1 3 |
- *
- */
- uint32x4_t vecIn = vldrwq_u32((uint32_t const *)pDataSrc);
- vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs, vecIn);
- return (ARM_MATH_SUCCESS);
- }
- __STATIC_INLINE arm_status arm_mat_trans_32bit_3x3_mve(
- uint32_t * pDataSrc,
- uint32_t * pDataDest)
- {
- const uint32x4_t vecOffs1 = { 0, 3, 6, 1};
- const uint32x4_t vecOffs2 = { 4, 7, 2, 5};
- /*
- *
- * | 0 1 2 | | 0 3 6 | 4 x 32 flattened version | 0 3 6 1 |
- * | 3 4 5 | => | 1 4 7 | => | 4 7 2 5 |
- * | 6 7 8 | | 2 5 8 | (row major) | 8 . . . |
- *
- */
- uint32x4_t vecIn1 = vldrwq_u32((uint32_t const *) pDataSrc);
- uint32x4_t vecIn2 = vldrwq_u32((uint32_t const *) &pDataSrc[4]);
- vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs1, vecIn1);
- vstrwq_scatter_shifted_offset_u32(pDataDest, vecOffs2, vecIn2);
- pDataDest[8] = pDataSrc[8];
- return (ARM_MATH_SUCCESS);
- }
- __STATIC_INLINE arm_status arm_mat_trans_32bit_4x4_mve(uint32_t * pDataSrc, uint32_t * pDataDest)
- {
- /*
- * 4x4 Matrix transposition
- * is 4 x de-interleave operation
- *
- * 0 1 2 3 0 4 8 12
- * 4 5 6 7 1 5 9 13
- * 8 9 10 11 2 6 10 14
- * 12 13 14 15 3 7 11 15
- */
- uint32x4x4_t vecIn;
- vecIn = vld4q((uint32_t const *) pDataSrc);
- vstrwq(pDataDest, vecIn.val[0]);
- pDataDest += 4;
- vstrwq(pDataDest, vecIn.val[1]);
- pDataDest += 4;
- vstrwq(pDataDest, vecIn.val[2]);
- pDataDest += 4;
- vstrwq(pDataDest, vecIn.val[3]);
- return (ARM_MATH_SUCCESS);
- }
- __STATIC_INLINE arm_status arm_mat_trans_32bit_generic_mve(
- uint16_t srcRows,
- uint16_t srcCols,
- uint32_t * pDataSrc,
- uint32_t * pDataDest)
- {
- uint32x4_t vecOffs;
- uint32_t i;
- uint32_t blkCnt;
- uint32_t const *pDataC;
- uint32_t *pDataDestR;
- uint32x4_t vecIn;
- vecOffs = vidupq_u32((uint32_t)0, 1);
- vecOffs = vecOffs * srcCols;
- i = srcCols;
- do
- {
- pDataC = (uint32_t const *) pDataSrc;
- pDataDestR = pDataDest;
- blkCnt = srcRows >> 2;
- while (blkCnt > 0U)
- {
- vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs);
- vstrwq(pDataDestR, vecIn);
- pDataDestR += 4;
- pDataC = pDataC + srcCols * 4;
- /*
- * Decrement the blockSize loop counter
- */
- blkCnt--;
- }
- /*
- * tail
- */
- blkCnt = srcRows & 3;
- if (blkCnt > 0U)
- {
- mve_pred16_t p0 = vctp32q(blkCnt);
- vecIn = vldrwq_gather_shifted_offset_u32(pDataC, vecOffs);
- vstrwq_p(pDataDestR, vecIn, p0);
- }
- pDataSrc += 1;
- pDataDest += srcRows;
- }
- while (--i);
- return (ARM_MATH_SUCCESS);
- }
- __STATIC_INLINE arm_status arm_mat_cmplx_trans_32bit(
- uint16_t srcRows,
- uint16_t srcCols,
- uint32_t *pDataSrc,
- uint16_t dstRows,
- uint16_t dstCols,
- uint32_t *pDataDest)
- {
- uint32_t i;
- uint32_t const *pDataC;
- uint32_t *pDataRow;
- uint32_t *pDataDestR, *pDataDestRow;
- uint32x4_t vecOffsRef, vecOffsCur;
- uint32_t blkCnt;
- uint32x4_t vecIn;
- #ifdef ARM_MATH_MATRIX_CHECK
- /*
- * Check for matrix mismatch condition
- */
- if ((srcRows != dstCols) || (srcCols != dstRows))
- {
- /*
- * Set status as ARM_MATH_SIZE_MISMATCH
- */
- return ARM_MATH_SIZE_MISMATCH;
- }
- #else
- (void)dstRows;
- (void)dstCols;
- #endif
- /* 2x2, 3x3 and 4x4 specialization to be added */
- vecOffsRef[0] = 0;
- vecOffsRef[1] = 1;
- vecOffsRef[2] = srcCols << 1;
- vecOffsRef[3] = (srcCols << 1) + 1;
- pDataRow = pDataSrc;
- pDataDestRow = pDataDest;
- i = srcCols;
- do
- {
- pDataC = (uint32_t const *) pDataRow;
- pDataDestR = pDataDestRow;
- vecOffsCur = vecOffsRef;
- blkCnt = (srcRows * CMPLX_DIM) >> 2;
- while (blkCnt > 0U)
- {
- vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur);
- vstrwq(pDataDestR, vecIn);
- pDataDestR += 4;
- vecOffsCur = vaddq(vecOffsCur, (srcCols << 2));
- /*
- * Decrement the blockSize loop counter
- */
- blkCnt--;
- }
- /*
- * tail
- * (will be merged thru tail predication)
- */
- blkCnt = (srcRows * CMPLX_DIM) & 3;
- if (blkCnt > 0U)
- {
- mve_pred16_t p0 = vctp32q(blkCnt);
- vecIn = vldrwq_gather_shifted_offset(pDataC, vecOffsCur);
- vstrwq_p(pDataDestR, vecIn, p0);
- }
- pDataRow += CMPLX_DIM;
- pDataDestRow += (srcRows * CMPLX_DIM);
- }
- while (--i);
- return (ARM_MATH_SUCCESS);
- }
- __STATIC_INLINE arm_status arm_mat_trans_16bit_2x2(uint16_t * pDataSrc, uint16_t * pDataDest)
- {
- pDataDest[0] = pDataSrc[0];
- pDataDest[3] = pDataSrc[3];
- pDataDest[2] = pDataSrc[1];
- pDataDest[1] = pDataSrc[2];
- return (ARM_MATH_SUCCESS);
- }
- __STATIC_INLINE arm_status arm_mat_trans_16bit_3x3_mve(uint16_t * pDataSrc, uint16_t * pDataDest)
- {
- static const uint16_t stridesTr33[8] = { 0, 3, 6, 1, 4, 7, 2, 5 };
- uint16x8_t vecOffs1;
- uint16x8_t vecIn1;
- /*
- *
- * | 0 1 2 | | 0 3 6 | 8 x 16 flattened version | 0 3 6 1 4 7 2 5 |
- * | 3 4 5 | => | 1 4 7 | => | 8 . . . . . . . |
- * | 6 7 8 | | 2 5 8 | (row major)
- *
- */
- vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr33);
- vecIn1 = vldrhq_u16((uint16_t const *) pDataSrc);
- vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1);
- pDataDest[8] = pDataSrc[8];
- return (ARM_MATH_SUCCESS);
- }
- __STATIC_INLINE arm_status arm_mat_trans_16bit_4x4_mve(uint16_t * pDataSrc, uint16_t * pDataDest)
- {
- static const uint16_t stridesTr44_1[8] = { 0, 4, 8, 12, 1, 5, 9, 13 };
- static const uint16_t stridesTr44_2[8] = { 2, 6, 10, 14, 3, 7, 11, 15 };
- uint16x8_t vecOffs1, vecOffs2;
- uint16x8_t vecIn1, vecIn2;
- uint16_t const * pDataSrcVec = (uint16_t const *) pDataSrc;
- /*
- * 4x4 Matrix transposition
- *
- * | 0 1 2 3 | | 0 4 8 12 | 8 x 16 flattened version
- * | 4 5 6 7 | => | 1 5 9 13 | => [0 4 8 12 1 5 9 13]
- * | 8 9 10 11 | | 2 6 10 14 | [2 6 10 14 3 7 11 15]
- * | 12 13 14 15 | | 3 7 11 15 |
- */
- vecOffs1 = vldrhq_u16((uint16_t const *) stridesTr44_1);
- vecOffs2 = vldrhq_u16((uint16_t const *) stridesTr44_2);
- vecIn1 = vldrhq_u16(pDataSrcVec);
- pDataSrcVec += 8;
- vecIn2 = vldrhq_u16(pDataSrcVec);
- vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs1, vecIn1);
- vstrhq_scatter_shifted_offset_u16(pDataDest, vecOffs2, vecIn2);
- return (ARM_MATH_SUCCESS);
- }
- __STATIC_INLINE arm_status arm_mat_trans_16bit_generic(
- uint16_t srcRows,
- uint16_t srcCols,
- uint16_t * pDataSrc,
- uint16_t * pDataDest)
- {
- uint16x8_t vecOffs;
- uint32_t i;
- uint32_t blkCnt;
- uint16_t const *pDataC;
- uint16_t *pDataDestR;
- uint16x8_t vecIn;
- vecOffs = vidupq_u16((uint32_t)0, 1);
- vecOffs = vecOffs * srcCols;
- i = srcCols;
- while(i > 0U)
- {
- pDataC = (uint16_t const *) pDataSrc;
- pDataDestR = pDataDest;
- blkCnt = srcRows >> 3;
- while (blkCnt > 0U)
- {
- vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs);
- vstrhq_u16(pDataDestR, vecIn);
- pDataDestR += 8;
- pDataC = pDataC + srcCols * 8;
- /*
- * Decrement the blockSize loop counter
- */
- blkCnt--;
- }
- /*
- * tail
- */
- blkCnt = srcRows & 7;
- if (blkCnt > 0U)
- {
- mve_pred16_t p0 = vctp16q(blkCnt);
- vecIn = vldrhq_gather_shifted_offset_u16(pDataC, vecOffs);
- vstrhq_p_u16(pDataDestR, vecIn, p0);
- }
- pDataSrc += 1;
- pDataDest += srcRows;
- i--;
- }
- return (ARM_MATH_SUCCESS);
- }
- __STATIC_INLINE arm_status arm_mat_cmplx_trans_16bit(
- uint16_t srcRows,
- uint16_t srcCols,
- uint16_t *pDataSrc,
- uint16_t dstRows,
- uint16_t dstCols,
- uint16_t *pDataDest)
- {
- static const uint16_t loadCmplxCol[8] = { 0, 0, 1, 1, 2, 2, 3, 3 };
- int i;
- uint16x8_t vecOffsRef, vecOffsCur;
- uint16_t const *pDataC;
- uint16_t *pDataRow;
- uint16_t *pDataDestR, *pDataDestRow;
- uint32_t blkCnt;
- uint16x8_t vecIn;
- #ifdef ARM_MATH_MATRIX_CHECK
- /*
- * Check for matrix mismatch condition
- */
- if ((srcRows != dstCols) || (srcCols != dstRows))
- {
- /*
- * Set status as ARM_MATH_SIZE_MISMATCH
- */
- return ARM_MATH_SIZE_MISMATCH;
- }
- #else
- (void)dstRows;
- (void)dstCols;
- #endif
- /*
- * 2x2, 3x3 and 4x4 specialization to be added
- */
- /*
- * build [0, 1, 2xcol, 2xcol+1, 4xcol, 4xcol+1, 6xcol, 6xcol+1]
- */
- vecOffsRef = vldrhq_u16((uint16_t const *) loadCmplxCol);
- vecOffsRef = vmulq(vecOffsRef, (uint16_t) (srcCols * CMPLX_DIM))
- + viwdupq_u16((uint32_t)0, (uint16_t) 2, 1);
- pDataRow = pDataSrc;
- pDataDestRow = pDataDest;
- i = srcCols;
- do
- {
- pDataC = (uint16_t const *) pDataRow;
- pDataDestR = pDataDestRow;
- vecOffsCur = vecOffsRef;
- blkCnt = (srcRows * CMPLX_DIM) >> 3;
- while (blkCnt > 0U)
- {
- vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur);
- vstrhq(pDataDestR, vecIn);
- pDataDestR+= 8; // VEC_LANES_U16
- vecOffsCur = vaddq(vecOffsCur, (srcCols << 3));
- /*
- * Decrement the blockSize loop counter
- */
- blkCnt--;
- }
- /*
- * tail
- * (will be merged thru tail predication)
- */
- blkCnt = (srcRows * CMPLX_DIM) & 0x7;
- if (blkCnt > 0U)
- {
- mve_pred16_t p0 = vctp16q(blkCnt);
- vecIn = vldrhq_gather_shifted_offset(pDataC, vecOffsCur);
- vstrhq_p(pDataDestR, vecIn, p0);
- }
- pDataRow += CMPLX_DIM;
- pDataDestRow += (srcRows * CMPLX_DIM);
- }
- while (--i);
- return (ARM_MATH_SUCCESS);
- }
- #endif /* MVEF and MVEI */
- /***************************************
- Definitions available for MVEI only
- ***************************************/
- #if (defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI)) && !defined(ARM_MATH_AUTOVECTORIZE)
- #include "arm_common_tables.h"
- #define MVE_ASRL_SAT16(acc, shift) ((sqrshrl_sat48(acc, -(32-shift)) >> 32) & 0xffffffff)
- #define MVE_ASRL_SAT32(acc, shift) ((sqrshrl(acc, -(32-shift)) >> 32) & 0xffffffff)
- #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE)
- __STATIC_INLINE q31x4_t FAST_VSQRT_Q31(q31x4_t vecIn)
- {
- q63x2_t vecTmpLL;
- q31x4_t vecTmp0, vecTmp1;
- q31_t scale;
- q63_t tmp64;
- q31x4_t vecNrm, vecDst, vecIdx, vecSignBits;
- vecSignBits = vclsq(vecIn);
- vecSignBits = vbicq_n_s32(vecSignBits, 1);
- /*
- * in = in << no_of_sign_bits;
- */
- vecNrm = vshlq(vecIn, vecSignBits);
- /*
- * index = in >> 24;
- */
- vecIdx = vecNrm >> 24;
- vecIdx = vecIdx << 1;
- vecTmp0 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx);
- vecIdx = vecIdx + 1;
- vecTmp1 = vldrwq_gather_shifted_offset_s32(sqrtTable_Q31, (uint32x4_t)vecIdx);
- vecTmp1 = vqrdmulhq(vecTmp1, vecNrm);
- vecTmp0 = vecTmp0 - vecTmp1;
- vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0);
- vecTmp1 = vqrdmulhq(vecNrm, vecTmp1);
- vecTmp1 = vdupq_n_s32(0x18000000) - vecTmp1;
- vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1);
- vecTmpLL = vmullbq_int(vecNrm, vecTmp0);
- /*
- * scale elements 0, 2
- */
- scale = 26 + (vecSignBits[0] >> 1);
- tmp64 = asrl(vecTmpLL[0], scale);
- vecDst[0] = (q31_t) tmp64;
- scale = 26 + (vecSignBits[2] >> 1);
- tmp64 = asrl(vecTmpLL[1], scale);
- vecDst[2] = (q31_t) tmp64;
- vecTmpLL = vmulltq_int(vecNrm, vecTmp0);
- /*
- * scale elements 1, 3
- */
- scale = 26 + (vecSignBits[1] >> 1);
- tmp64 = asrl(vecTmpLL[0], scale);
- vecDst[1] = (q31_t) tmp64;
- scale = 26 + (vecSignBits[3] >> 1);
- tmp64 = asrl(vecTmpLL[1], scale);
- vecDst[3] = (q31_t) tmp64;
- /*
- * set negative values to 0
- */
- vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s32(vecIn, 0));
- return vecDst;
- }
- #endif
- #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q15_MVE)
- __STATIC_INLINE q15x8_t FAST_VSQRT_Q15(q15x8_t vecIn)
- {
- q31x4_t vecTmpLev, vecTmpLodd, vecSignL;
- q15x8_t vecTmp0, vecTmp1;
- q15x8_t vecNrm, vecDst, vecIdx, vecSignBits;
- vecDst = vuninitializedq_s16();
- vecSignBits = vclsq(vecIn);
- vecSignBits = vbicq_n_s16(vecSignBits, 1);
- /*
- * in = in << no_of_sign_bits;
- */
- vecNrm = vshlq(vecIn, vecSignBits);
- vecIdx = vecNrm >> 8;
- vecIdx = vecIdx << 1;
- vecTmp0 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx);
- vecIdx = vecIdx + 1;
- vecTmp1 = vldrhq_gather_shifted_offset_s16(sqrtTable_Q15, (uint16x8_t)vecIdx);
- vecTmp1 = vqrdmulhq(vecTmp1, vecNrm);
- vecTmp0 = vecTmp0 - vecTmp1;
- vecTmp1 = vqrdmulhq(vecTmp0, vecTmp0);
- vecTmp1 = vqrdmulhq(vecNrm, vecTmp1);
- vecTmp1 = vdupq_n_s16(0x1800) - vecTmp1;
- vecTmp0 = vqrdmulhq(vecTmp0, vecTmp1);
- vecSignBits = vecSignBits >> 1;
- vecTmpLev = vmullbq_int(vecNrm, vecTmp0);
- vecTmpLodd = vmulltq_int(vecNrm, vecTmp0);
- vecTmp0 = vecSignBits + 10;
- /*
- * negate sign to apply register based vshl
- */
- vecTmp0 = -vecTmp0;
- /*
- * shift even elements
- */
- vecSignL = vmovlbq(vecTmp0);
- vecTmpLev = vshlq(vecTmpLev, vecSignL);
- /*
- * shift odd elements
- */
- vecSignL = vmovltq(vecTmp0);
- vecTmpLodd = vshlq(vecTmpLodd, vecSignL);
- /*
- * merge and narrow odd and even parts
- */
- vecDst = vmovnbq_s32(vecDst, vecTmpLev);
- vecDst = vmovntq_s32(vecDst, vecTmpLodd);
- /*
- * set negative values to 0
- */
- vecDst = vdupq_m(vecDst, 0, vcmpltq_n_s16(vecIn, 0));
- return vecDst;
- }
- #endif
- #endif /* defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEI) */
- #ifdef __cplusplus
- }
- #endif
- #endif
|