1*f3087befSAndrew Turner /* 2*f3087befSAndrew Turner * Double-precision vector pow function. 3*f3087befSAndrew Turner * 4*f3087befSAndrew Turner * Copyright (c) 2020-2024, Arm Limited. 5*f3087befSAndrew Turner * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception 6*f3087befSAndrew Turner */ 7*f3087befSAndrew Turner 8*f3087befSAndrew Turner #include "v_math.h" 9*f3087befSAndrew Turner #include "test_sig.h" 10*f3087befSAndrew Turner #include "test_defs.h" 11*f3087befSAndrew Turner 12*f3087befSAndrew Turner /* Defines parameters of the approximation and scalar fallback. */ 13*f3087befSAndrew Turner #include "finite_pow.h" 14*f3087befSAndrew Turner 15*f3087befSAndrew Turner #define VecSmallPowX v_u64 (SmallPowX) 16*f3087befSAndrew Turner #define VecThresPowX v_u64 (ThresPowX) 17*f3087befSAndrew Turner #define VecSmallPowY v_u64 (SmallPowY) 18*f3087befSAndrew Turner #define VecThresPowY v_u64 (ThresPowY) 19*f3087befSAndrew Turner 20*f3087befSAndrew Turner static const struct data 21*f3087befSAndrew Turner { 22*f3087befSAndrew Turner uint64x2_t inf; 23*f3087befSAndrew Turner float64x2_t small_powx; 24*f3087befSAndrew Turner uint64x2_t offset, mask; 25*f3087befSAndrew Turner uint64x2_t mask_sub_0, mask_sub_1; 26*f3087befSAndrew Turner float64x2_t log_c0, log_c2, log_c4, log_c5; 27*f3087befSAndrew Turner double log_c1, log_c3; 28*f3087befSAndrew Turner double ln2_lo, ln2_hi; 29*f3087befSAndrew Turner uint64x2_t small_exp, thres_exp; 30*f3087befSAndrew Turner double ln2_lo_n, ln2_hi_n; 31*f3087befSAndrew Turner double inv_ln2_n, exp_c2; 32*f3087befSAndrew Turner float64x2_t exp_c0, exp_c1; 33*f3087befSAndrew Turner } data = { 34*f3087befSAndrew Turner /* Power threshold. */ 35*f3087befSAndrew Turner .inf = V2 (0x7ff0000000000000), 36*f3087befSAndrew Turner .small_powx = V2 (0x1p-126), 37*f3087befSAndrew Turner .offset = V2 (Off), 38*f3087befSAndrew Turner .mask = V2 (0xfffULL << 52), 39*f3087befSAndrew Turner .mask_sub_0 = V2 (1ULL << 52), 40*f3087befSAndrew Turner .mask_sub_1 = V2 (52ULL << 52), 41*f3087befSAndrew Turner /* Coefficients copied from v_pow_log_data.c 42*f3087befSAndrew Turner relative error: 0x1.11922ap-70 in [-0x1.6bp-8, 0x1.6bp-8] 43*f3087befSAndrew Turner Coefficients are scaled to match the scaling during evaluation. */ 44*f3087befSAndrew Turner .log_c0 = V2 (0x1.555555555556p-2 * -2), 45*f3087befSAndrew Turner .log_c1 = -0x1.0000000000006p-2 * -2, 46*f3087befSAndrew Turner .log_c2 = V2 (0x1.999999959554ep-3 * 4), 47*f3087befSAndrew Turner .log_c3 = -0x1.555555529a47ap-3 * 4, 48*f3087befSAndrew Turner .log_c4 = V2 (0x1.2495b9b4845e9p-3 * -8), 49*f3087befSAndrew Turner .log_c5 = V2 (-0x1.0002b8b263fc3p-3 * -8), 50*f3087befSAndrew Turner .ln2_hi = 0x1.62e42fefa3800p-1, 51*f3087befSAndrew Turner .ln2_lo = 0x1.ef35793c76730p-45, 52*f3087befSAndrew Turner /* Polynomial coefficients: abs error: 1.43*2^-58, ulp error: 0.549 53*f3087befSAndrew Turner (0.550 without fma) if |x| < ln2/512. */ 54*f3087befSAndrew Turner .exp_c0 = V2 (0x1.fffffffffffd4p-2), 55*f3087befSAndrew Turner .exp_c1 = V2 (0x1.5555571d6ef9p-3), 56*f3087befSAndrew Turner .exp_c2 = 0x1.5555576a5adcep-5, 57*f3087befSAndrew Turner .small_exp = V2 (0x3c90000000000000), 58*f3087befSAndrew Turner .thres_exp = V2 (0x03f0000000000000), 59*f3087befSAndrew Turner .inv_ln2_n = 0x1.71547652b82fep8, /* N/ln2. */ 60*f3087befSAndrew Turner .ln2_hi_n = 0x1.62e42fefc0000p-9, /* ln2/N. */ 61*f3087befSAndrew Turner .ln2_lo_n = -0x1.c610ca86c3899p-45, 62*f3087befSAndrew Turner }; 63*f3087befSAndrew Turner 64*f3087befSAndrew Turner /* This version implements an algorithm close to scalar pow but 65*f3087befSAndrew Turner - does not implement the trick in the exp's specialcase subroutine to avoid 66*f3087befSAndrew Turner double-rounding, 67*f3087befSAndrew Turner - does not use a tail in the exponential core computation, 68*f3087befSAndrew Turner - and pow's exp polynomial order and table bits might differ. 69*f3087befSAndrew Turner 70*f3087befSAndrew Turner Maximum measured error is 1.04 ULPs: 71*f3087befSAndrew Turner _ZGVnN2vv_pow(0x1.024a3e56b3c3p-136, 0x1.87910248b58acp-13) 72*f3087befSAndrew Turner got 0x1.f71162f473251p-1 73*f3087befSAndrew Turner want 0x1.f71162f473252p-1. */ 74*f3087befSAndrew Turner 75*f3087befSAndrew Turner static inline float64x2_t 76*f3087befSAndrew Turner v_masked_lookup_f64 (const double *table, uint64x2_t i) 77*f3087befSAndrew Turner { 78*f3087befSAndrew Turner return (float64x2_t){ 79*f3087befSAndrew Turner table[(i[0] >> (52 - V_POW_LOG_TABLE_BITS)) & (N_LOG - 1)], 80*f3087befSAndrew Turner table[(i[1] >> (52 - V_POW_LOG_TABLE_BITS)) & (N_LOG - 1)] 81*f3087befSAndrew Turner }; 82*f3087befSAndrew Turner } 83*f3087befSAndrew Turner 84*f3087befSAndrew Turner /* Compute y+TAIL = log(x) where the rounded result is y and TAIL has about 85*f3087befSAndrew Turner additional 15 bits precision. IX is the bit representation of x, but 86*f3087befSAndrew Turner normalized in the subnormal range using the sign bit for the exponent. */ 87*f3087befSAndrew Turner static inline float64x2_t 88*f3087befSAndrew Turner v_log_inline (uint64x2_t ix, float64x2_t *tail, const struct data *d) 89*f3087befSAndrew Turner { 90*f3087befSAndrew Turner /* x = 2^k z; where z is in range [OFF,2*OFF) and exact. 91*f3087befSAndrew Turner The range is split into N subintervals. 92*f3087befSAndrew Turner The ith subinterval contains z and c is near its center. */ 93*f3087befSAndrew Turner uint64x2_t tmp = vsubq_u64 (ix, d->offset); 94*f3087befSAndrew Turner int64x2_t k = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); 95*f3087befSAndrew Turner uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, d->mask)); 96*f3087befSAndrew Turner float64x2_t z = vreinterpretq_f64_u64 (iz); 97*f3087befSAndrew Turner float64x2_t kd = vcvtq_f64_s64 (k); 98*f3087befSAndrew Turner /* log(x) = k*Ln2 + log(c) + log1p(z/c-1). */ 99*f3087befSAndrew Turner float64x2_t invc = v_masked_lookup_f64 (__v_pow_log_data.invc, tmp); 100*f3087befSAndrew Turner float64x2_t logc = v_masked_lookup_f64 (__v_pow_log_data.logc, tmp); 101*f3087befSAndrew Turner float64x2_t logctail = v_masked_lookup_f64 (__v_pow_log_data.logctail, tmp); 102*f3087befSAndrew Turner /* Note: 1/c is j/N or j/N/2 where j is an integer in [N,2N) and 103*f3087befSAndrew Turner |z/c - 1| < 1/N, so r = z/c - 1 is exactly representible. */ 104*f3087befSAndrew Turner float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, invc); 105*f3087befSAndrew Turner /* k*Ln2 + log(c) + r. */ 106*f3087befSAndrew Turner float64x2_t ln2 = vld1q_f64 (&d->ln2_lo); 107*f3087befSAndrew Turner float64x2_t t1 = vfmaq_laneq_f64 (logc, kd, ln2, 1); 108*f3087befSAndrew Turner float64x2_t t2 = vaddq_f64 (t1, r); 109*f3087befSAndrew Turner float64x2_t lo1 = vfmaq_laneq_f64 (logctail, kd, ln2, 0); 110*f3087befSAndrew Turner float64x2_t lo2 = vaddq_f64 (vsubq_f64 (t1, t2), r); 111*f3087befSAndrew Turner /* Evaluation is optimized assuming superscalar pipelined execution. */ 112*f3087befSAndrew Turner float64x2_t ar = vmulq_f64 (v_f64 (-0.5), r); 113*f3087befSAndrew Turner float64x2_t ar2 = vmulq_f64 (r, ar); 114*f3087befSAndrew Turner float64x2_t ar3 = vmulq_f64 (r, ar2); 115*f3087befSAndrew Turner /* k*Ln2 + log(c) + r + A[0]*r*r. */ 116*f3087befSAndrew Turner float64x2_t hi = vaddq_f64 (t2, ar2); 117*f3087befSAndrew Turner float64x2_t lo3 = vfmaq_f64 (vnegq_f64 (ar2), ar, r); 118*f3087befSAndrew Turner float64x2_t lo4 = vaddq_f64 (vsubq_f64 (t2, hi), ar2); 119*f3087befSAndrew Turner /* p = log1p(r) - r - A[0]*r*r. */ 120*f3087befSAndrew Turner float64x2_t odd_coeffs = vld1q_f64 (&d->log_c1); 121*f3087befSAndrew Turner float64x2_t a56 = vfmaq_f64 (d->log_c4, r, d->log_c5); 122*f3087befSAndrew Turner float64x2_t a34 = vfmaq_laneq_f64 (d->log_c2, r, odd_coeffs, 1); 123*f3087befSAndrew Turner float64x2_t a12 = vfmaq_laneq_f64 (d->log_c0, r, odd_coeffs, 0); 124*f3087befSAndrew Turner float64x2_t p = vfmaq_f64 (a34, ar2, a56); 125*f3087befSAndrew Turner p = vfmaq_f64 (a12, ar2, p); 126*f3087befSAndrew Turner p = vmulq_f64 (ar3, p); 127*f3087befSAndrew Turner float64x2_t lo 128*f3087befSAndrew Turner = vaddq_f64 (vaddq_f64 (vaddq_f64 (vaddq_f64 (lo1, lo2), lo3), lo4), p); 129*f3087befSAndrew Turner float64x2_t y = vaddq_f64 (hi, lo); 130*f3087befSAndrew Turner *tail = vaddq_f64 (vsubq_f64 (hi, y), lo); 131*f3087befSAndrew Turner return y; 132*f3087befSAndrew Turner } 133*f3087befSAndrew Turner 134*f3087befSAndrew Turner static float64x2_t VPCS_ATTR NOINLINE 135*f3087befSAndrew Turner exp_special_case (float64x2_t x, float64x2_t xtail) 136*f3087befSAndrew Turner { 137*f3087befSAndrew Turner return (float64x2_t){ exp_nosignbias (x[0], xtail[0]), 138*f3087befSAndrew Turner exp_nosignbias (x[1], xtail[1]) }; 139*f3087befSAndrew Turner } 140*f3087befSAndrew Turner 141*f3087befSAndrew Turner /* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|. */ 142*f3087befSAndrew Turner static inline float64x2_t 143*f3087befSAndrew Turner v_exp_inline (float64x2_t x, float64x2_t neg_xtail, const struct data *d) 144*f3087befSAndrew Turner { 145*f3087befSAndrew Turner /* Fallback to scalar exp_inline for all lanes if any lane 146*f3087befSAndrew Turner contains value of x s.t. |x| <= 2^-54 or >= 512. */ 147*f3087befSAndrew Turner uint64x2_t uoflowx = vcgeq_u64 ( 148*f3087befSAndrew Turner vsubq_u64 (vreinterpretq_u64_f64 (vabsq_f64 (x)), d->small_exp), 149*f3087befSAndrew Turner d->thres_exp); 150*f3087befSAndrew Turner if (unlikely (v_any_u64 (uoflowx))) 151*f3087befSAndrew Turner return exp_special_case (x, vnegq_f64 (neg_xtail)); 152*f3087befSAndrew Turner 153*f3087befSAndrew Turner /* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */ 154*f3087befSAndrew Turner /* x = ln2/N*k + r, with k integer and r in [-ln2/2N, ln2/2N]. */ 155*f3087befSAndrew Turner /* z - kd is in [-1, 1] in non-nearest rounding modes. */ 156*f3087befSAndrew Turner float64x2_t exp_consts = vld1q_f64 (&d->inv_ln2_n); 157*f3087befSAndrew Turner float64x2_t z = vmulq_laneq_f64 (x, exp_consts, 0); 158*f3087befSAndrew Turner float64x2_t kd = vrndnq_f64 (z); 159*f3087befSAndrew Turner uint64x2_t ki = vreinterpretq_u64_s64 (vcvtaq_s64_f64 (z)); 160*f3087befSAndrew Turner float64x2_t ln2_n = vld1q_f64 (&d->ln2_lo_n); 161*f3087befSAndrew Turner float64x2_t r = vfmsq_laneq_f64 (x, kd, ln2_n, 1); 162*f3087befSAndrew Turner r = vfmsq_laneq_f64 (r, kd, ln2_n, 0); 163*f3087befSAndrew Turner /* The code assumes 2^-200 < |xtail| < 2^-8/N. */ 164*f3087befSAndrew Turner r = vsubq_f64 (r, neg_xtail); 165*f3087befSAndrew Turner /* 2^(k/N) ~= scale. */ 166*f3087befSAndrew Turner uint64x2_t idx = vandq_u64 (ki, v_u64 (N_EXP - 1)); 167*f3087befSAndrew Turner uint64x2_t top = vshlq_n_u64 (ki, 52 - V_POW_EXP_TABLE_BITS); 168*f3087befSAndrew Turner /* This is only a valid scale when -1023*N < k < 1024*N. */ 169*f3087befSAndrew Turner uint64x2_t sbits = v_lookup_u64 (SBits, idx); 170*f3087befSAndrew Turner sbits = vaddq_u64 (sbits, top); 171*f3087befSAndrew Turner /* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (exp(r) - 1). */ 172*f3087befSAndrew Turner float64x2_t r2 = vmulq_f64 (r, r); 173*f3087befSAndrew Turner float64x2_t tmp = vfmaq_laneq_f64 (d->exp_c1, r, exp_consts, 1); 174*f3087befSAndrew Turner tmp = vfmaq_f64 (d->exp_c0, r, tmp); 175*f3087befSAndrew Turner tmp = vfmaq_f64 (r, r2, tmp); 176*f3087befSAndrew Turner float64x2_t scale = vreinterpretq_f64_u64 (sbits); 177*f3087befSAndrew Turner /* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there 178*f3087befSAndrew Turner is no spurious underflow here even without fma. */ 179*f3087befSAndrew Turner return vfmaq_f64 (scale, scale, tmp); 180*f3087befSAndrew Turner } 181*f3087befSAndrew Turner 182*f3087befSAndrew Turner static float64x2_t NOINLINE VPCS_ATTR 183*f3087befSAndrew Turner scalar_fallback (float64x2_t x, float64x2_t y) 184*f3087befSAndrew Turner { 185*f3087befSAndrew Turner return (float64x2_t){ pow_scalar_special_case (x[0], y[0]), 186*f3087befSAndrew Turner pow_scalar_special_case (x[1], y[1]) }; 187*f3087befSAndrew Turner } 188*f3087befSAndrew Turner 189*f3087befSAndrew Turner float64x2_t VPCS_ATTR V_NAME_D2 (pow) (float64x2_t x, float64x2_t y) 190*f3087befSAndrew Turner { 191*f3087befSAndrew Turner const struct data *d = ptr_barrier (&data); 192*f3087befSAndrew Turner /* Case of x <= 0 is too complicated to be vectorised efficiently here, 193*f3087befSAndrew Turner fallback to scalar pow for all lanes if any x < 0 detected. */ 194*f3087befSAndrew Turner if (v_any_u64 (vclezq_s64 (vreinterpretq_s64_f64 (x)))) 195*f3087befSAndrew Turner return scalar_fallback (x, y); 196*f3087befSAndrew Turner 197*f3087befSAndrew Turner uint64x2_t vix = vreinterpretq_u64_f64 (x); 198*f3087befSAndrew Turner uint64x2_t viy = vreinterpretq_u64_f64 (y); 199*f3087befSAndrew Turner uint64x2_t iay = vandq_u64 (viy, d->inf); 200*f3087befSAndrew Turner 201*f3087befSAndrew Turner /* Special cases of x or y. */ 202*f3087befSAndrew Turner #if WANT_SIMD_EXCEPT 203*f3087befSAndrew Turner /* Small or large. */ 204*f3087befSAndrew Turner uint64x2_t vtopx = vshrq_n_u64 (vix, 52); 205*f3087befSAndrew Turner uint64x2_t vabstopy = vshrq_n_u64 (iay, 52); 206*f3087befSAndrew Turner uint64x2_t specialx 207*f3087befSAndrew Turner = vcgeq_u64 (vsubq_u64 (vtopx, VecSmallPowX), VecThresPowX); 208*f3087befSAndrew Turner uint64x2_t specialy 209*f3087befSAndrew Turner = vcgeq_u64 (vsubq_u64 (vabstopy, VecSmallPowY), VecThresPowY); 210*f3087befSAndrew Turner #else 211*f3087befSAndrew Turner /* The case y==0 does not trigger a special case, since in this case it is 212*f3087befSAndrew Turner necessary to fix the result only if x is a signalling nan, which already 213*f3087befSAndrew Turner triggers a special case. We test y==0 directly in the scalar fallback. */ 214*f3087befSAndrew Turner uint64x2_t iax = vandq_u64 (vix, d->inf); 215*f3087befSAndrew Turner uint64x2_t specialx = vcgeq_u64 (iax, d->inf); 216*f3087befSAndrew Turner uint64x2_t specialy = vcgeq_u64 (iay, d->inf); 217*f3087befSAndrew Turner #endif 218*f3087befSAndrew Turner uint64x2_t special = vorrq_u64 (specialx, specialy); 219*f3087befSAndrew Turner /* Fallback to scalar on all lanes if any lane is inf or nan. */ 220*f3087befSAndrew Turner if (unlikely (v_any_u64 (special))) 221*f3087befSAndrew Turner return scalar_fallback (x, y); 222*f3087befSAndrew Turner 223*f3087befSAndrew Turner /* Small cases of x: |x| < 0x1p-126. */ 224*f3087befSAndrew Turner uint64x2_t smallx = vcaltq_f64 (x, d->small_powx); 225*f3087befSAndrew Turner if (unlikely (v_any_u64 (smallx))) 226*f3087befSAndrew Turner { 227*f3087befSAndrew Turner /* Update ix if top 12 bits of x are 0. */ 228*f3087befSAndrew Turner uint64x2_t sub_x = vceqzq_u64 (vshrq_n_u64 (vix, 52)); 229*f3087befSAndrew Turner if (unlikely (v_any_u64 (sub_x))) 230*f3087befSAndrew Turner { 231*f3087befSAndrew Turner /* Normalize subnormal x so exponent becomes negative. */ 232*f3087befSAndrew Turner uint64x2_t vix_norm = vreinterpretq_u64_f64 ( 233*f3087befSAndrew Turner vabsq_f64 (vmulq_f64 (x, vcvtq_f64_u64 (d->mask_sub_0)))); 234*f3087befSAndrew Turner vix_norm = vsubq_u64 (vix_norm, d->mask_sub_1); 235*f3087befSAndrew Turner vix = vbslq_u64 (sub_x, vix_norm, vix); 236*f3087befSAndrew Turner } 237*f3087befSAndrew Turner } 238*f3087befSAndrew Turner 239*f3087befSAndrew Turner /* Vector Log(ix, &lo). */ 240*f3087befSAndrew Turner float64x2_t vlo; 241*f3087befSAndrew Turner float64x2_t vhi = v_log_inline (vix, &vlo, d); 242*f3087befSAndrew Turner 243*f3087befSAndrew Turner /* Vector Exp(y_loghi, y_loglo). */ 244*f3087befSAndrew Turner float64x2_t vehi = vmulq_f64 (y, vhi); 245*f3087befSAndrew Turner float64x2_t vemi = vfmsq_f64 (vehi, y, vhi); 246*f3087befSAndrew Turner float64x2_t neg_velo = vfmsq_f64 (vemi, y, vlo); 247*f3087befSAndrew Turner return v_exp_inline (vehi, neg_velo, d); 248*f3087befSAndrew Turner } 249*f3087befSAndrew Turner 250*f3087befSAndrew Turner TEST_SIG (V, D, 2, pow) 251*f3087befSAndrew Turner TEST_ULP (V_NAME_D2 (pow), 0.55) 252*f3087befSAndrew Turner TEST_DISABLE_FENV_IF_NOT (V_NAME_D2 (pow), WANT_SIMD_EXCEPT) 253*f3087befSAndrew Turner /* Wide intervals spanning the whole domain but shared between x and y. */ 254*f3087befSAndrew Turner #define V_POW_INTERVAL2(xlo, xhi, ylo, yhi, n) \ 255*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), xlo, xhi, ylo, yhi, n) \ 256*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), xlo, xhi, -ylo, -yhi, n) \ 257*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), -xlo, -xhi, ylo, yhi, n) \ 258*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), -xlo, -xhi, -ylo, -yhi, n) 259*f3087befSAndrew Turner #define EXPAND(str) str##000000000 260*f3087befSAndrew Turner #define SHL52(str) EXPAND (str) 261*f3087befSAndrew Turner V_POW_INTERVAL2 (0, SHL52 (SmallPowX), 0, inf, 40000) 262*f3087befSAndrew Turner V_POW_INTERVAL2 (SHL52 (SmallPowX), SHL52 (BigPowX), 0, inf, 40000) 263*f3087befSAndrew Turner V_POW_INTERVAL2 (SHL52 (BigPowX), inf, 0, inf, 40000) 264*f3087befSAndrew Turner V_POW_INTERVAL2 (0, inf, 0, SHL52 (SmallPowY), 40000) 265*f3087befSAndrew Turner V_POW_INTERVAL2 (0, inf, SHL52 (SmallPowY), SHL52 (BigPowY), 40000) 266*f3087befSAndrew Turner V_POW_INTERVAL2 (0, inf, SHL52 (BigPowY), inf, 40000) 267*f3087befSAndrew Turner V_POW_INTERVAL2 (0, inf, 0, inf, 1000) 268*f3087befSAndrew Turner /* x~1 or y~1. */ 269*f3087befSAndrew Turner V_POW_INTERVAL2 (0x1p-1, 0x1p1, 0x1p-10, 0x1p10, 10000) 270*f3087befSAndrew Turner V_POW_INTERVAL2 (0x1p-500, 0x1p500, 0x1p-1, 0x1p1, 10000) 271*f3087befSAndrew Turner V_POW_INTERVAL2 (0x1.ep-1, 0x1.1p0, 0x1p8, 0x1p16, 10000) 272*f3087befSAndrew Turner /* around argmaxs of ULP error. */ 273*f3087befSAndrew Turner V_POW_INTERVAL2 (0x1p-300, 0x1p-200, 0x1p-20, 0x1p-10, 10000) 274*f3087befSAndrew Turner V_POW_INTERVAL2 (0x1p50, 0x1p100, 0x1p-20, 0x1p-10, 10000) 275*f3087befSAndrew Turner /* x is negative, y is odd or even integer, or y is real not integer. */ 276*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 3.0, 3.0, 10000) 277*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 4.0, 4.0, 10000) 278*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 0.0, 10.0, 10000) 279*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), 0.0, 10.0, -0.0, -10.0, 10000) 280*f3087befSAndrew Turner /* 1.0^y. */ 281*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 0.0, 0x1p-50, 1000) 282*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 0x1p-50, 1.0, 1000) 283*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 1.0, 0x1p100, 1000) 284*f3087befSAndrew Turner TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, -1.0, -0x1p120, 1000) 285