1*f3087befSAndrew Turner /* 2*f3087befSAndrew Turner * Double-precision vector erf(x) function. 3*f3087befSAndrew Turner * 4*f3087befSAndrew Turner * Copyright (c) 2023-2024, Arm Limited. 5*f3087befSAndrew Turner * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception 6*f3087befSAndrew Turner */ 7*f3087befSAndrew Turner 8*f3087befSAndrew Turner #include "v_math.h" 9*f3087befSAndrew Turner #include "test_sig.h" 10*f3087befSAndrew Turner #include "test_defs.h" 11*f3087befSAndrew Turner 12*f3087befSAndrew Turner static const struct data 13*f3087befSAndrew Turner { 14*f3087befSAndrew Turner float64x2_t third; 15*f3087befSAndrew Turner float64x2_t tenth, two_over_five, two_over_nine; 16*f3087befSAndrew Turner double two_over_fifteen, two_over_fortyfive; 17*f3087befSAndrew Turner float64x2_t max, shift; 18*f3087befSAndrew Turner uint64x2_t max_idx; 19*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT 20*f3087befSAndrew Turner float64x2_t tiny_bound, huge_bound, scale_minus_one; 21*f3087befSAndrew Turner #endif 22*f3087befSAndrew Turner } data = { 23*f3087befSAndrew Turner .max_idx = V2 (768), 24*f3087befSAndrew Turner .third = V2 (0x1.5555555555556p-2), /* used to compute 2/3 and 1/6 too. */ 25*f3087befSAndrew Turner .two_over_fifteen = 0x1.1111111111111p-3, 26*f3087befSAndrew Turner .tenth = V2 (-0x1.999999999999ap-4), 27*f3087befSAndrew Turner .two_over_five = V2 (-0x1.999999999999ap-2), 28*f3087befSAndrew Turner .two_over_nine = V2 (-0x1.c71c71c71c71cp-3), 29*f3087befSAndrew Turner .two_over_fortyfive = 0x1.6c16c16c16c17p-5, 30*f3087befSAndrew Turner .max = V2 (5.9921875), /* 6 - 1/128. */ 31*f3087befSAndrew Turner .shift = V2 (0x1p45), 32*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT 33*f3087befSAndrew Turner .huge_bound = V2 (0x1p205), 34*f3087befSAndrew Turner .tiny_bound = V2 (0x1p-226), 35*f3087befSAndrew Turner .scale_minus_one = V2 (0x1.06eba8214db69p-3), /* 2/sqrt(pi) - 1.0. */ 36*f3087befSAndrew Turner #endif 37*f3087befSAndrew Turner }; 38*f3087befSAndrew Turner 39*f3087befSAndrew Turner #define AbsMask 0x7fffffffffffffff 40*f3087befSAndrew Turner 41*f3087befSAndrew Turner struct entry 42*f3087befSAndrew Turner { 43*f3087befSAndrew Turner float64x2_t erf; 44*f3087befSAndrew Turner float64x2_t scale; 45*f3087befSAndrew Turner }; 46*f3087befSAndrew Turner 47*f3087befSAndrew Turner static inline struct entry 48*f3087befSAndrew Turner lookup (uint64x2_t i) 49*f3087befSAndrew Turner { 50*f3087befSAndrew Turner struct entry e; 51*f3087befSAndrew Turner float64x2_t e1 = vld1q_f64 (&__v_erf_data.tab[vgetq_lane_u64 (i, 0)].erf), 52*f3087befSAndrew Turner e2 = vld1q_f64 (&__v_erf_data.tab[vgetq_lane_u64 (i, 1)].erf); 53*f3087befSAndrew Turner e.erf = vuzp1q_f64 (e1, e2); 54*f3087befSAndrew Turner e.scale = vuzp2q_f64 (e1, e2); 55*f3087befSAndrew Turner return e; 56*f3087befSAndrew Turner } 57*f3087befSAndrew Turner 58*f3087befSAndrew Turner /* Double-precision implementation of vector erf(x). 59*f3087befSAndrew Turner Approximation based on series expansion near x rounded to 60*f3087befSAndrew Turner nearest multiple of 1/128. 61*f3087befSAndrew Turner Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r, 62*f3087befSAndrew Turner 63*f3087befSAndrew Turner erf(x) ~ erf(r) + scale * d * [ 64*f3087befSAndrew Turner + 1 65*f3087befSAndrew Turner - r d 66*f3087befSAndrew Turner + 1/3 (2 r^2 - 1) d^2 67*f3087befSAndrew Turner - 1/6 (r (2 r^2 - 3)) d^3 68*f3087befSAndrew Turner + 1/30 (4 r^4 - 12 r^2 + 3) d^4 69*f3087befSAndrew Turner - 1/90 (4 r^4 - 20 r^2 + 15) d^5 70*f3087befSAndrew Turner ] 71*f3087befSAndrew Turner 72*f3087befSAndrew Turner Maximum measure error: 2.29 ULP 73*f3087befSAndrew Turner V_NAME_D1 (erf)(-0x1.00003c924e5d1p-8) got -0x1.20dd59132ebadp-8 74*f3087befSAndrew Turner want -0x1.20dd59132ebafp-8. */ 75*f3087befSAndrew Turner float64x2_t VPCS_ATTR V_NAME_D1 (erf) (float64x2_t x) 76*f3087befSAndrew Turner { 77*f3087befSAndrew Turner const struct data *dat = ptr_barrier (&data); 78*f3087befSAndrew Turner 79*f3087befSAndrew Turner float64x2_t a = vabsq_f64 (x); 80*f3087befSAndrew Turner /* Reciprocal conditions that do not catch NaNs so they can be used in BSLs 81*f3087befSAndrew Turner to return expected results. */ 82*f3087befSAndrew Turner uint64x2_t a_le_max = vcaleq_f64 (x, dat->max); 83*f3087befSAndrew Turner uint64x2_t a_gt_max = vcagtq_f64 (x, dat->max); 84*f3087befSAndrew Turner 85*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT 86*f3087befSAndrew Turner /* |x| huge or tiny. */ 87*f3087befSAndrew Turner uint64x2_t cmp1 = vcgtq_f64 (a, dat->huge_bound); 88*f3087befSAndrew Turner uint64x2_t cmp2 = vcltq_f64 (a, dat->tiny_bound); 89*f3087befSAndrew Turner uint64x2_t cmp = vorrq_u64 (cmp1, cmp2); 90*f3087befSAndrew Turner /* If any lanes are special, mask them with 1 for small x or 8 for large 91*f3087befSAndrew Turner values and retain a copy of a to allow special case handler to fix special 92*f3087befSAndrew Turner lanes later. This is only necessary if fenv exceptions are to be triggered 93*f3087befSAndrew Turner correctly. */ 94*f3087befSAndrew Turner if (unlikely (v_any_u64 (cmp))) 95*f3087befSAndrew Turner { 96*f3087befSAndrew Turner a = vbslq_f64 (cmp1, v_f64 (8.0), a); 97*f3087befSAndrew Turner a = vbslq_f64 (cmp2, v_f64 (1.0), a); 98*f3087befSAndrew Turner } 99*f3087befSAndrew Turner #endif 100*f3087befSAndrew Turner 101*f3087befSAndrew Turner /* Set r to multiple of 1/128 nearest to |x|. */ 102*f3087befSAndrew Turner float64x2_t shift = dat->shift; 103*f3087befSAndrew Turner float64x2_t z = vaddq_f64 (a, shift); 104*f3087befSAndrew Turner 105*f3087befSAndrew Turner /* Lookup erf(r) and scale(r) in table, without shortcut for small values, 106*f3087befSAndrew Turner but with saturated indices for large values and NaNs in order to avoid 107*f3087befSAndrew Turner segfault. */ 108*f3087befSAndrew Turner uint64x2_t i 109*f3087befSAndrew Turner = vsubq_u64 (vreinterpretq_u64_f64 (z), vreinterpretq_u64_f64 (shift)); 110*f3087befSAndrew Turner i = vbslq_u64 (a_le_max, i, dat->max_idx); 111*f3087befSAndrew Turner struct entry e = lookup (i); 112*f3087befSAndrew Turner 113*f3087befSAndrew Turner float64x2_t r = vsubq_f64 (z, shift); 114*f3087befSAndrew Turner 115*f3087befSAndrew Turner /* erf(x) ~ erf(r) + scale * d * poly (r, d). */ 116*f3087befSAndrew Turner float64x2_t d = vsubq_f64 (a, r); 117*f3087befSAndrew Turner float64x2_t d2 = vmulq_f64 (d, d); 118*f3087befSAndrew Turner float64x2_t r2 = vmulq_f64 (r, r); 119*f3087befSAndrew Turner 120*f3087befSAndrew Turner float64x2_t two_over_fifteen_and_fortyfive 121*f3087befSAndrew Turner = vld1q_f64 (&dat->two_over_fifteen); 122*f3087befSAndrew Turner 123*f3087befSAndrew Turner /* poly (d, r) = 1 + p1(r) * d + p2(r) * d^2 + ... + p5(r) * d^5. */ 124*f3087befSAndrew Turner float64x2_t p1 = r; 125*f3087befSAndrew Turner float64x2_t p2 126*f3087befSAndrew Turner = vfmsq_f64 (dat->third, r2, vaddq_f64 (dat->third, dat->third)); 127*f3087befSAndrew Turner float64x2_t p3 = vmulq_f64 (r, vfmaq_f64 (v_f64 (-0.5), r2, dat->third)); 128*f3087befSAndrew Turner float64x2_t p4 = vfmaq_laneq_f64 (dat->two_over_five, r2, 129*f3087befSAndrew Turner two_over_fifteen_and_fortyfive, 0); 130*f3087befSAndrew Turner p4 = vfmsq_f64 (dat->tenth, r2, p4); 131*f3087befSAndrew Turner float64x2_t p5 = vfmaq_laneq_f64 (dat->two_over_nine, r2, 132*f3087befSAndrew Turner two_over_fifteen_and_fortyfive, 1); 133*f3087befSAndrew Turner p5 = vmulq_f64 (r, vfmaq_f64 (vmulq_f64 (v_f64 (0.5), dat->third), r2, p5)); 134*f3087befSAndrew Turner 135*f3087befSAndrew Turner float64x2_t p34 = vfmaq_f64 (p3, d, p4); 136*f3087befSAndrew Turner float64x2_t p12 = vfmaq_f64 (p1, d, p2); 137*f3087befSAndrew Turner float64x2_t y = vfmaq_f64 (p34, d2, p5); 138*f3087befSAndrew Turner y = vfmaq_f64 (p12, d2, y); 139*f3087befSAndrew Turner 140*f3087befSAndrew Turner y = vfmaq_f64 (e.erf, e.scale, vfmsq_f64 (d, d2, y)); 141*f3087befSAndrew Turner 142*f3087befSAndrew Turner /* Solves the |x| = inf and NaN cases. */ 143*f3087befSAndrew Turner y = vbslq_f64 (a_gt_max, v_f64 (1.0), y); 144*f3087befSAndrew Turner 145*f3087befSAndrew Turner /* Copy sign. */ 146*f3087befSAndrew Turner y = vbslq_f64 (v_u64 (AbsMask), y, x); 147*f3087befSAndrew Turner 148*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT 149*f3087befSAndrew Turner if (unlikely (v_any_u64 (cmp2))) 150*f3087befSAndrew Turner { 151*f3087befSAndrew Turner /* Neutralise huge values of x before fixing small values. */ 152*f3087befSAndrew Turner x = vbslq_f64 (cmp1, v_f64 (1.0), x); 153*f3087befSAndrew Turner /* Fix tiny values that trigger spurious underflow. */ 154*f3087befSAndrew Turner return vbslq_f64 (cmp2, vfmaq_f64 (x, dat->scale_minus_one, x), y); 155*f3087befSAndrew Turner } 156*f3087befSAndrew Turner #endif 157*f3087befSAndrew Turner return y; 158*f3087befSAndrew Turner } 159*f3087befSAndrew Turner 160*f3087befSAndrew Turner TEST_SIG (V, D, 1, erf, -6.0, 6.0) 161*f3087befSAndrew Turner TEST_ULP (V_NAME_D1 (erf), 1.79) 162*f3087befSAndrew Turner /* WANT_SIMD_EXCEPT blocks miss some cases. */ 163*f3087befSAndrew Turner TEST_DISABLE_FENV (V_NAME_D1 (erf)) 164*f3087befSAndrew Turner TEST_SYM_INTERVAL (V_NAME_D1 (erf), 0, 5.9921875, 40000) 165*f3087befSAndrew Turner TEST_SYM_INTERVAL (V_NAME_D1 (erf), 5.9921875, inf, 40000) 166*f3087befSAndrew Turner TEST_SYM_INTERVAL (V_NAME_D1 (erf), 0, inf, 40000) 167