NEMath.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. /*
  2. * Copyright (c) 2016, 2019 ARM Limited.
  3. *
  4. * SPDX-License-Identifier: MIT
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to
  8. * deal in the Software without restriction, including without limitation the
  9. * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
  10. * sell copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in all
  14. * copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  19. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22. * SOFTWARE.
  23. */
  24. #ifndef __ARM_COMPUTE_NEMATH_H__
  25. #define __ARM_COMPUTE_NEMATH_H__
  26. #if defined(ARM_MATH_NEON)
  27. #if defined(__aarch64__)
  28. /** Perform a 7th degree polynomial approximation using Estrin's method.
  29. *
  30. * @param[in] x Input vector value in F32 format.
  31. * @param[in] coeffs Polynomial coefficients table. (array of flattened float32x4_t vectors)
  32. *
  33. * @return The calculated approximation.
  34. */
  35. static inline float64x2_t vtaylor_polyq_f64(float64x2_t x, const float64_t *coeffs);
  36. /** Calculate reciprocal.
  37. *
  38. * @param[in] x Input value.
  39. *
  40. * @return The calculated reciprocal.
  41. */
  42. static inline float64x2_t vinvq_f64(float64x2_t x);
  43. #endif /* #if defined(__aarch64__) */
  44. /** Calculate floor of a vector.
  45. *
  46. * @param[in] val Input vector value in F32 format.
  47. *
  48. * @return The calculated floor vector.
  49. */
  50. static inline float32x4_t vfloorq_f32(float32x4_t val);
  51. /** Calculate inverse square root.
  52. *
  53. * @param[in] x Input value.
  54. *
  55. * @return The calculated inverse square root.
  56. */
  57. static inline float32x2_t vinvsqrt_f32(float32x2_t x);
  58. /** Calculate inverse square root.
  59. *
  60. * @param[in] x Input value.
  61. *
  62. * @return The calculated inverse square root.
  63. */
  64. static inline float32x4_t vinvsqrtq_f32(float32x4_t x);
  65. /** Calculate reciprocal.
  66. *
  67. * @param[in] x Input value.
  68. *
  69. * @return The calculated reciprocal.
  70. */
  71. static inline float32x2_t vinv_f32(float32x2_t x);
  72. /** Calculate reciprocal.
  73. *
  74. * @param[in] x Input value.
  75. *
  76. * @return The calculated reciprocal.
  77. */
  78. static inline float32x4_t vinvq_f32(float32x4_t x);
  79. /** Perform a 7th degree polynomial approximation using Estrin's method.
  80. *
  81. * @param[in] x Input vector value in F32 format.
  82. * @param[in] coeffs Polynomial coefficients table. (array of flattened float32x4_t vectors)
  83. *
  84. * @return The calculated approximation.
  85. */
  86. static inline float32x4_t vtaylor_polyq_f32(float32x4_t x, const float32_t *coeffs);
  87. /** Calculate exponential
  88. *
  89. * @param[in] x Input vector value in F32 format.
  90. *
  91. * @return The calculated exponent.
  92. */
  93. static inline float32x4_t vexpq_f32(float32x4_t x);
  94. /** Calculate logarithm
  95. *
  96. * @param[in] x Input vector value in F32 format.
  97. *
  98. * @return The calculated logarithm.
  99. */
  100. static inline float32x4_t vlogq_f32(float32x4_t x);
  101. /** Calculate hyperbolic tangent.
  102. *
  103. * tanh(x) = (e^2x - 1)/(e^2x + 1)
  104. *
  105. * @note We clamp x to [-5,5] to avoid overflowing issues.
  106. *
  107. * @param[in] val Input vector value in F32 format.
  108. *
  109. * @return The calculated Hyperbolic Tangent.
  110. */
  111. static inline float32x4_t vtanhq_f32(float32x4_t val);
  112. /** Calculate n power of a number.
  113. *
  114. * pow(x,n) = e^(n*log(x))
  115. *
  116. * @param[in] val Input vector value in F32 format.
  117. * @param[in] n Powers to raise the input to.
  118. *
  119. * @return The calculated power.
  120. */
  121. static inline float32x4_t vpowq_f32(float32x4_t val, float32x4_t n);
  122. #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  123. /** Calculate hyperbolic tangent.
  124. *
  125. * tanh(x) = (e^2x - 1)/(e^2x + 1)
  126. *
  127. * @note We clamp x to [-5,5] to avoid overflowing issues.
  128. *
  129. * @param[in] val Input vector value in F32 format.
  130. *
  131. * @return The calculated Hyperbolic Tangent.
  132. */
  133. static inline float16x8_t vtanhq_f16(float16x8_t val);
  134. /** Calculate reciprocal.
  135. *
  136. * @param[in] x Input value.
  137. *
  138. * @return The calculated reciprocal.
  139. */
  140. static inline float16x4_t vinv_f16(float16x4_t x);
  141. /** Calculate reciprocal.
  142. *
  143. * @param[in] x Input value.
  144. *
  145. * @return The calculated reciprocal.
  146. */
  147. static inline float16x8_t vinvq_f16(float16x8_t x);
  148. /** Calculate inverse square root.
  149. *
  150. * @param[in] x Input value.
  151. *
  152. * @return The calculated inverse square root.
  153. */
  154. static inline float16x4_t vinvsqrt_f16(float16x4_t x);
  155. /** Calculate inverse square root.
  156. *
  157. * @param[in] x Input value.
  158. *
  159. * @return The calculated inverse square root.
  160. */
  161. static inline float16x8_t vinvsqrtq_f16(float16x8_t x);
  162. /** Calculate exponential
  163. *
  164. * @param[in] x Input vector value in F16 format.
  165. *
  166. * @return The calculated exponent.
  167. */
  168. static inline float16x8_t vexpq_f16(float16x8_t x);
  169. /** Calculate n power of a number.
  170. *
  171. * pow(x,n) = e^(n*log(x))
  172. *
  173. * @param[in] val Input vector value in F16 format.
  174. * @param[in] n Powers to raise the input to.
  175. *
  176. * @return The calculated power.
  177. */
  178. static inline float16x8_t vpowq_f16(float16x8_t val, float16x8_t n);
  179. #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
  180. /** Exponent polynomial coefficients */
  181. extern const float32_t exp_tab[4*8];
  182. extern const float64_t exp_tab_64[2*8];
  183. /** Logarithm polynomial coefficients */
  184. extern const float32_t log_tab[4*8];
  185. extern const float64_t log_tab_64[2*8];
  186. #ifndef DOXYGEN_SKIP_THIS
  187. static inline float32x4_t vfloorq_f32(float32x4_t val)
  188. {
  189. static const float32_t CONST_1[4] = {1.f,1.f,1.f,1.f};
  190. const int32x4_t z = vcvtq_s32_f32(val);
  191. const float32x4_t r = vcvtq_f32_s32(z);
  192. return vbslq_f32(vcgtq_f32(r, val), vsubq_f32(r, vld1q_f32(CONST_1)), r);
  193. }
  194. static inline float32x2_t vinvsqrt_f32(float32x2_t x)
  195. {
  196. float32x2_t sqrt_reciprocal = vrsqrte_f32(x);
  197. sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
  198. sqrt_reciprocal = vmul_f32(vrsqrts_f32(vmul_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
  199. return sqrt_reciprocal;
  200. }
  201. static inline float32x4_t vinvsqrtq_f32(float32x4_t x)
  202. {
  203. float32x4_t sqrt_reciprocal = vrsqrteq_f32(x);
  204. sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
  205. sqrt_reciprocal = vmulq_f32(vrsqrtsq_f32(vmulq_f32(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
  206. return sqrt_reciprocal;
  207. }
  208. static inline float32x2_t vinv_f32(float32x2_t x)
  209. {
  210. float32x2_t recip = vrecpe_f32(x);
  211. recip = vmul_f32(vrecps_f32(x, recip), recip);
  212. recip = vmul_f32(vrecps_f32(x, recip), recip);
  213. return recip;
  214. }
  215. static inline float32x4_t vinvq_f32(float32x4_t x)
  216. {
  217. float32x4_t recip = vrecpeq_f32(x);
  218. recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
  219. recip = vmulq_f32(vrecpsq_f32(x, recip), recip);
  220. return recip;
  221. }
  222. #if defined(__aarch64__)
  223. static inline float64x2_t vinvq_f64(float64x2_t x)
  224. {
  225. float64x2_t recip = vrecpeq_f64(x);
  226. recip = vmulq_f64(vrecpsq_f64(x, recip), recip);
  227. recip = vmulq_f64(vrecpsq_f64(x, recip), recip);
  228. return recip;
  229. }
  230. #endif /* #if defined(__aarch64__) */
  231. static inline float32x4_t vtaylor_polyq_f32(float32x4_t x, const float32_t *coeffs)
  232. {
  233. float32x4_t A = vmlaq_f32(vld1q_f32(&coeffs[4*0]), vld1q_f32(&coeffs[4*4]), x);
  234. float32x4_t B = vmlaq_f32(vld1q_f32(&coeffs[4*2]), vld1q_f32(&coeffs[4*6]), x);
  235. float32x4_t C = vmlaq_f32(vld1q_f32(&coeffs[4*1]), vld1q_f32(&coeffs[4*5]), x);
  236. float32x4_t D = vmlaq_f32(vld1q_f32(&coeffs[4*3]), vld1q_f32(&coeffs[4*7]), x);
  237. float32x4_t x2 = vmulq_f32(x, x);
  238. float32x4_t x4 = vmulq_f32(x2, x2);
  239. float32x4_t res = vmlaq_f32(vmlaq_f32(A, B, x2), vmlaq_f32(C, D, x2), x4);
  240. return res;
  241. }
  242. #if defined(__aarch64__)
  243. static inline float64x2_t vtaylor_polyq_f64(float64x2_t x, const float64_t *coeffs)
  244. {
  245. float64x2_t A = vmlaq_f64(vld1q_f64(&coeffs[2*0]), vld1q_f64(&coeffs[2*4]), x);
  246. float64x2_t B = vmlaq_f64(vld1q_f64(&coeffs[2*2]), vld1q_f64(&coeffs[2*6]), x);
  247. float64x2_t C = vmlaq_f64(vld1q_f64(&coeffs[2*1]), vld1q_f64(&coeffs[2*5]), x);
  248. float64x2_t D = vmlaq_f64(vld1q_f64(&coeffs[2*3]), vld1q_f64(&coeffs[2*7]), x);
  249. float64x2_t x2 = vmulq_f64(x, x);
  250. float64x2_t x4 = vmulq_f64(x2, x2);
  251. float64x2_t res = vmlaq_f64(vmlaq_f64(A, B, x2), vmlaq_f64(C, D, x2), x4);
  252. return res;
  253. }
  254. #endif /* #if defined(__aarch64__) */
  255. static inline float32x4_t vexpq_f32(float32x4_t x)
  256. {
  257. static const float32_t CONST_LN2[4] = {0.6931471805f,0.6931471805f,0.6931471805f,0.6931471805f}; // ln(2)
  258. static const float32_t CONST_INV_LN2[4] = {1.4426950408f,1.4426950408f,1.4426950408f,1.4426950408f}; // 1/ln(2)
  259. static const float32_t CONST_0[4] = {0.f,0.f,0.f,0.f};
  260. static const int32_t CONST_NEGATIVE_126[4] = {-126,-126,-126,-126};
  261. // Perform range reduction [-log(2),log(2)]
  262. int32x4_t m = vcvtq_s32_f32(vmulq_f32(x, vld1q_f32(CONST_INV_LN2)));
  263. float32x4_t val = vmlsq_f32(x, vcvtq_f32_s32(m), vld1q_f32(CONST_LN2));
  264. // Polynomial Approximation
  265. float32x4_t poly = vtaylor_polyq_f32(val, exp_tab);
  266. // Reconstruct
  267. poly = vreinterpretq_f32_s32(vqaddq_s32(vreinterpretq_s32_f32(poly), vqshlq_n_s32(m, 23)));
  268. poly = vbslq_f32(vcltq_s32(m, vld1q_s32(CONST_NEGATIVE_126)), vld1q_f32(CONST_0), poly);
  269. return poly;
  270. }
  271. static inline float32x4_t vlogq_f32(float32x4_t x)
  272. {
  273. static const int32_t CONST_127[4] = {127,127,127,127}; // 127
  274. static const float32_t CONST_LN2[4] = {0.6931471805f,0.6931471805f,0.6931471805f,0.6931471805f}; // ln(2)
  275. // Extract exponent
  276. int32x4_t m = vsubq_s32(vreinterpretq_s32_u32(vshrq_n_u32(vreinterpretq_u32_f32(x), 23)), vld1q_s32(CONST_127));
  277. float32x4_t val = vreinterpretq_f32_s32(vsubq_s32(vreinterpretq_s32_f32(x), vshlq_n_s32(m, 23)));
  278. // Polynomial Approximation
  279. float32x4_t poly = vtaylor_polyq_f32(val, log_tab);
  280. // Reconstruct
  281. poly = vmlaq_f32(poly, vcvtq_f32_s32(m), vld1q_f32(CONST_LN2));
  282. return poly;
  283. }
  284. static inline float32x4_t vtanhq_f32(float32x4_t val)
  285. {
  286. static const float32_t CONST_1[4] = {1.f,1.f,1.f,1.f};
  287. static const float32_t CONST_2[4] = {2.f,2.f,2.f,2.f};
  288. static const float32_t CONST_MIN_TANH[4] = {-10.f,-10.f,-10.f,-10.f};
  289. static const float32_t CONST_MAX_TANH[4] = {10.f,10.f,10.f,10.f};
  290. float32x4_t x = vminq_f32(vmaxq_f32(val, vld1q_f32(CONST_MIN_TANH)), vld1q_f32(CONST_MAX_TANH));
  291. float32x4_t exp2x = vexpq_f32(vmulq_f32(vld1q_f32(CONST_2), x));
  292. float32x4_t num = vsubq_f32(exp2x, vld1q_f32(CONST_1));
  293. float32x4_t den = vaddq_f32(exp2x, vld1q_f32(CONST_1));
  294. float32x4_t tanh = vmulq_f32(num, vinvq_f32(den));
  295. return tanh;
  296. }
  297. static inline float32x4_t vpowq_f32(float32x4_t val, float32x4_t n)
  298. {
  299. return vexpq_f32(vmulq_f32(n, vlogq_f32(val)));
  300. }
  301. #endif /* DOXYGEN_SKIP_THIS */
  302. #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  303. /** Exponent polynomial coefficients */
  304. /** Logarithm polynomial coefficients */
  305. #ifndef DOXYGEN_SKIP_THIS
  306. static inline float16x8_t vfloorq_f16(float16x8_t val)
  307. {
  308. static const float16_t CONST_1[8] = {1.f,1.f,1.f,1.f,1.f,1.f,1.f,1.f};
  309. const int16x8_t z = vcvtq_s16_f16(val);
  310. const float16x8_t r = vcvtq_f16_s16(z);
  311. return vbslq_f16(vcgtq_f16(r, val), vsubq_f16(r, vld1q_f16(CONST_1)), r);
  312. }
  313. static inline float16x4_t vinvsqrt_f16(float16x4_t x)
  314. {
  315. float16x4_t sqrt_reciprocal = vrsqrte_f16(x);
  316. sqrt_reciprocal = vmul_f16(vrsqrts_f16(vmul_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
  317. sqrt_reciprocal = vmul_f16(vrsqrts_f16(vmul_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
  318. return sqrt_reciprocal;
  319. }
  320. static inline float16x8_t vinvsqrtq_f16(float16x8_t x)
  321. {
  322. float16x8_t sqrt_reciprocal = vrsqrteq_f16(x);
  323. sqrt_reciprocal = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
  324. sqrt_reciprocal = vmulq_f16(vrsqrtsq_f16(vmulq_f16(x, sqrt_reciprocal), sqrt_reciprocal), sqrt_reciprocal);
  325. return sqrt_reciprocal;
  326. }
  327. static inline float16x4_t vinv_f16(float16x4_t x)
  328. {
  329. float16x4_t recip = vrecpe_f16(x);
  330. recip = vmul_f16(vrecps_f16(x, recip), recip);
  331. recip = vmul_f16(vrecps_f16(x, recip), recip);
  332. return recip;
  333. }
  334. static inline float16x8_t vinvq_f16(float16x8_t x)
  335. {
  336. float16x8_t recip = vrecpeq_f16(x);
  337. recip = vmulq_f16(vrecpsq_f16(x, recip), recip);
  338. recip = vmulq_f16(vrecpsq_f16(x, recip), recip);
  339. return recip;
  340. }
  341. static inline float16x8_t vtanhq_f16(float16x8_t val)
  342. {
  343. const float16_t CONST_1[8] = {1.f,1.f,1.f,1.f,1.f,1.f,1.f,1.f};
  344. const float16_t CONST_2[8] = {2.f,2.f,2.f,2.f,2.f,2.f,2.f,2.f};
  345. const float16_t CONST_MIN_TANH[8] = {-10.f,-10.f,-10.f,-10.f,-10.f,-10.f,-10.f,-10.f};
  346. const float16_t CONST_MAX_TANH[8] = {10.f,10.f,10.f,10.f,10.f,10.f,10.f,10.f};
  347. const float16x8_t x = vminq_f16(vmaxq_f16(val, vld1q_f16(CONST_MIN_TANH)), vld1q_f16(CONST_MAX_TANH));
  348. const float16x8_t exp2x = vexpq_f16(vmulq_f16(vld1q_f16(CONST_2), x));
  349. const float16x8_t num = vsubq_f16(exp2x, vld1q_f16(CONST_1));
  350. const float16x8_t den = vaddq_f16(exp2x, vld1q_f16(CONST_1));
  351. const float16x8_t tanh = vmulq_f16(num, vinvq_f16(den));
  352. return tanh;
  353. }
  354. static inline float16x8_t vtaylor_polyq_f16(float16x8_t x, const float16_t *coeffs)
  355. {
  356. const float16x8_t A = vaddq_f16(vld1q_f16(&coeffs[8*0]), vmulq_f16(vld1q_f16(&coeffs[8*4]), x));
  357. const float16x8_t B = vaddq_f16(vld1q_f16(&coeffs[8*2]), vmulq_f16(vld1q_f16(&coeffs[8*6]), x));
  358. const float16x8_t C = vaddq_f16(vld1q_f16(&coeffs[8*1]), vmulq_f16(vld1q_f16(&coeffs[8*5]), x));
  359. const float16x8_t D = vaddq_f16(vld1q_f16(&coeffs[8*3]), vmulq_f16(vld1q_f16(&coeffs[8*7]), x));
  360. const float16x8_t x2 = vmulq_f16(x, x);
  361. const float16x8_t x4 = vmulq_f16(x2, x2);
  362. const float16x8_t res = vaddq_f16(vaddq_f16(A, vmulq_f16(B, x2)), vmulq_f16(vaddq_f16(C, vmulq_f16(D, x2)), x4));
  363. return res;
  364. }
  365. static inline float16x8_t vexpq_f16(float16x8_t x)
  366. {
  367. // TODO (COMPMID-1535) : Revisit FP16 approximations
  368. const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
  369. const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
  370. const float16x8_t res = vcvt_high_f16_f32(vcvt_f16_f32(vexpq_f32(x_low)), vexpq_f32(x_high));
  371. return res;
  372. }
  373. static inline float16x8_t vlogq_f16(float16x8_t x)
  374. {
  375. // TODO (COMPMID-1535) : Revisit FP16 approximations
  376. const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
  377. const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
  378. const float16x8_t res = vcvt_high_f16_f32(vcvt_f16_f32(vlogq_f32(x_low)), vlogq_f32(x_high));
  379. return res;
  380. }
  381. static inline float16x8_t vpowq_f16(float16x8_t val, float16x8_t n)
  382. {
  383. // TODO (giaiod01) - COMPMID-1535
  384. float32x4_t n0_f32 = vcvt_f32_f16(vget_low_f16(n));
  385. float32x4_t n1_f32 = vcvt_f32_f16(vget_high_f16(n));
  386. float32x4_t val0_f32 = vcvt_f32_f16(vget_low_f16(val));
  387. float32x4_t val1_f32 = vcvt_f32_f16(vget_high_f16(val));
  388. float32x4_t res0_f32 = vexpq_f32(vmulq_f32(n0_f32, vlogq_f32(val0_f32)));
  389. float32x4_t res1_f32 = vexpq_f32(vmulq_f32(n1_f32, vlogq_f32(val1_f32)));
  390. return vcombine_f16(vcvt_f16_f32(res0_f32), vcvt_f16_f32(res1_f32));
  391. }
  392. #endif /* DOXYGEN_SKIP_THIS */
  393. #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
  394. #endif
  395. #endif /* __ARM_COMPUTE_NEMATH_H__ */