1*f3087befSAndrew Turner /* 2*f3087befSAndrew Turner * Single-precision vector e^x function. 3*f3087befSAndrew Turner * 4*f3087befSAndrew Turner * Copyright (c) 2019-2024, Arm Limited. 5*f3087befSAndrew Turner * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception 6*f3087befSAndrew Turner */ 7*f3087befSAndrew Turner #include "v_math.h" 8*f3087befSAndrew Turner #include "test_defs.h" 9*f3087befSAndrew Turner #include "test_sig.h" 10*f3087befSAndrew Turner 11*f3087befSAndrew Turner static const struct data 12*f3087befSAndrew Turner { 13*f3087befSAndrew Turner float32x4_t c1, c3, c4, inv_ln2; 14*f3087befSAndrew Turner float ln2_hi, ln2_lo, c0, c2; 15*f3087befSAndrew Turner uint32x4_t exponent_bias, special_offset, special_bias; 16*f3087befSAndrew Turner #if !WANT_SIMD_EXCEPT 17*f3087befSAndrew Turner float32x4_t special_bound, scale_thresh; 18*f3087befSAndrew Turner #endif 19*f3087befSAndrew Turner } data = { 20*f3087befSAndrew Turner /* maxerr: 1.45358 +0.5 ulp. */ 21*f3087befSAndrew Turner .c0 = 0x1.0e4020p-7f, 22*f3087befSAndrew Turner .c1 = V4 (0x1.573e2ep-5f), 23*f3087befSAndrew Turner .c2 = 0x1.555e66p-3f, 24*f3087befSAndrew Turner .c3 = V4 (0x1.fffdb6p-2f), 25*f3087befSAndrew Turner .c4 = V4 (0x1.ffffecp-1f), 26*f3087befSAndrew Turner .inv_ln2 = V4 (0x1.715476p+0f), 27*f3087befSAndrew Turner .ln2_hi = 0x1.62e4p-1f, 28*f3087befSAndrew Turner .ln2_lo = 0x1.7f7d1cp-20f, 29*f3087befSAndrew Turner .exponent_bias = V4 (0x3f800000), 30*f3087befSAndrew Turner .special_offset = V4 (0x82000000), 31*f3087befSAndrew Turner .special_bias = V4 (0x7f000000), 32*f3087befSAndrew Turner #if !WANT_SIMD_EXCEPT 33*f3087befSAndrew Turner .special_bound = V4 (126.0f), 34*f3087befSAndrew Turner .scale_thresh = V4 (192.0f), 35*f3087befSAndrew Turner #endif 36*f3087befSAndrew Turner }; 37*f3087befSAndrew Turner 38*f3087befSAndrew Turner #define C(i) d->poly[i] 39*f3087befSAndrew Turner 40*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT 41*f3087befSAndrew Turner 42*f3087befSAndrew Turner # define TinyBound v_u32 (0x20000000) /* asuint (0x1p-63). */ 43*f3087befSAndrew Turner # define BigBound v_u32 (0x42800000) /* asuint (0x1p6). */ 44*f3087befSAndrew Turner # define SpecialBound v_u32 (0x22800000) /* BigBound - TinyBound. */ 45*f3087befSAndrew Turner 46*f3087befSAndrew Turner static float32x4_t VPCS_ATTR NOINLINE 47*f3087befSAndrew Turner special_case (float32x4_t x, float32x4_t y, uint32x4_t cmp) 48*f3087befSAndrew Turner { 49*f3087befSAndrew Turner /* If fenv exceptions are to be triggered correctly, fall back to the scalar 50*f3087befSAndrew Turner routine to special lanes. */ 51*f3087befSAndrew Turner return v_call_f32 (expf, x, y, cmp); 52*f3087befSAndrew Turner } 53*f3087befSAndrew Turner 54*f3087befSAndrew Turner #else 55*f3087befSAndrew Turner 56*f3087befSAndrew Turner static float32x4_t VPCS_ATTR NOINLINE 57*f3087befSAndrew Turner special_case (float32x4_t poly, float32x4_t n, uint32x4_t e, uint32x4_t cmp1, 58*f3087befSAndrew Turner float32x4_t scale, const struct data *d) 59*f3087befSAndrew Turner { 60*f3087befSAndrew Turner /* 2^n may overflow, break it up into s1*s2. */ 61*f3087befSAndrew Turner uint32x4_t b = vandq_u32 (vclezq_f32 (n), d->special_offset); 62*f3087befSAndrew Turner float32x4_t s1 = vreinterpretq_f32_u32 (vaddq_u32 (b, d->special_bias)); 63*f3087befSAndrew Turner float32x4_t s2 = vreinterpretq_f32_u32 (vsubq_u32 (e, b)); 64*f3087befSAndrew Turner uint32x4_t cmp2 = vcagtq_f32 (n, d->scale_thresh); 65*f3087befSAndrew Turner float32x4_t r2 = vmulq_f32 (s1, s1); 66*f3087befSAndrew Turner // (s2 + p*s2)*s1 = s2(p+1)s1 67*f3087befSAndrew Turner float32x4_t r1 = vmulq_f32 (vfmaq_f32 (s2, poly, s2), s1); 68*f3087befSAndrew Turner /* Similar to r1 but avoids double rounding in the subnormal range. */ 69*f3087befSAndrew Turner float32x4_t r0 = vfmaq_f32 (scale, poly, scale); 70*f3087befSAndrew Turner float32x4_t r = vbslq_f32 (cmp1, r1, r0); 71*f3087befSAndrew Turner return vbslq_f32 (cmp2, r2, r); 72*f3087befSAndrew Turner } 73*f3087befSAndrew Turner 74*f3087befSAndrew Turner #endif 75*f3087befSAndrew Turner 76*f3087befSAndrew Turner float32x4_t VPCS_ATTR NOINLINE V_NAME_F1 (exp) (float32x4_t x) 77*f3087befSAndrew Turner { 78*f3087befSAndrew Turner const struct data *d = ptr_barrier (&data); 79*f3087befSAndrew Turner float32x4_t ln2_c02 = vld1q_f32 (&d->ln2_hi); 80*f3087befSAndrew Turner 81*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT 82*f3087befSAndrew Turner /* asuint(x) - TinyBound >= BigBound - TinyBound. */ 83*f3087befSAndrew Turner uint32x4_t cmp = vcgeq_u32 ( 84*f3087befSAndrew Turner vsubq_u32 (vandq_u32 (vreinterpretq_u32_f32 (x), v_u32 (0x7fffffff)), 85*f3087befSAndrew Turner TinyBound), 86*f3087befSAndrew Turner SpecialBound); 87*f3087befSAndrew Turner float32x4_t xm = x; 88*f3087befSAndrew Turner /* If any lanes are special, mask them with 1 and retain a copy of x to allow 89*f3087befSAndrew Turner special case handler to fix special lanes later. This is only necessary if 90*f3087befSAndrew Turner fenv exceptions are to be triggered correctly. */ 91*f3087befSAndrew Turner if (unlikely (v_any_u32 (cmp))) 92*f3087befSAndrew Turner x = vbslq_f32 (cmp, v_f32 (1), x); 93*f3087befSAndrew Turner #endif 94*f3087befSAndrew Turner 95*f3087befSAndrew Turner /* exp(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)] 96*f3087befSAndrew Turner x = ln2*n + r, with r in [-ln2/2, ln2/2]. */ 97*f3087befSAndrew Turner float32x4_t n = vrndaq_f32 (vmulq_f32 (x, d->inv_ln2)); 98*f3087befSAndrew Turner float32x4_t r = vfmsq_laneq_f32 (x, n, ln2_c02, 0); 99*f3087befSAndrew Turner r = vfmsq_laneq_f32 (r, n, ln2_c02, 1); 100*f3087befSAndrew Turner uint32x4_t e = vshlq_n_u32 (vreinterpretq_u32_s32 (vcvtq_s32_f32 (n)), 23); 101*f3087befSAndrew Turner float32x4_t scale = vreinterpretq_f32_u32 (vaddq_u32 (e, d->exponent_bias)); 102*f3087befSAndrew Turner 103*f3087befSAndrew Turner #if !WANT_SIMD_EXCEPT 104*f3087befSAndrew Turner uint32x4_t cmp = vcagtq_f32 (n, d->special_bound); 105*f3087befSAndrew Turner #endif 106*f3087befSAndrew Turner 107*f3087befSAndrew Turner float32x4_t r2 = vmulq_f32 (r, r); 108*f3087befSAndrew Turner float32x4_t p = vfmaq_laneq_f32 (d->c1, r, ln2_c02, 2); 109*f3087befSAndrew Turner float32x4_t q = vfmaq_laneq_f32 (d->c3, r, ln2_c02, 3); 110*f3087befSAndrew Turner q = vfmaq_f32 (q, p, r2); 111*f3087befSAndrew Turner p = vmulq_f32 (d->c4, r); 112*f3087befSAndrew Turner float32x4_t poly = vfmaq_f32 (p, q, r2); 113*f3087befSAndrew Turner 114*f3087befSAndrew Turner if (unlikely (v_any_u32 (cmp))) 115*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT 116*f3087befSAndrew Turner return special_case (xm, vfmaq_f32 (scale, poly, scale), cmp); 117*f3087befSAndrew Turner #else 118*f3087befSAndrew Turner return special_case (poly, n, e, cmp, scale, d); 119*f3087befSAndrew Turner #endif 120*f3087befSAndrew Turner 121*f3087befSAndrew Turner return vfmaq_f32 (scale, poly, scale); 122*f3087befSAndrew Turner } 123*f3087befSAndrew Turner 124*f3087befSAndrew Turner HALF_WIDTH_ALIAS_F1 (exp) 125*f3087befSAndrew Turner 126*f3087befSAndrew Turner TEST_SIG (V, F, 1, exp, -9.9, 9.9) 127*f3087befSAndrew Turner TEST_ULP (V_NAME_F1 (exp), 1.49) 128*f3087befSAndrew Turner TEST_DISABLE_FENV_IF_NOT (V_NAME_F1 (exp), WANT_SIMD_EXCEPT) 129*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (exp), 0, 0xffff0000, 10000) 130*f3087befSAndrew Turner TEST_SYM_INTERVAL (V_NAME_F1 (exp), 0x1p-14, 0x1p8, 500000) 131