1*f3087befSAndrew Turner /* 2*f3087befSAndrew Turner * Single-precision vector log2 function. 3*f3087befSAndrew Turner * 4*f3087befSAndrew Turner * Copyright (c) 2022-2024, Arm Limited. 5*f3087befSAndrew Turner * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception 6*f3087befSAndrew Turner */ 7*f3087befSAndrew Turner 8*f3087befSAndrew Turner #include "v_math.h" 9*f3087befSAndrew Turner #include "test_sig.h" 10*f3087befSAndrew Turner #include "test_defs.h" 11*f3087befSAndrew Turner 12*f3087befSAndrew Turner static const struct data 13*f3087befSAndrew Turner { 14*f3087befSAndrew Turner float32x4_t c0, c2, c4, c6, c8; 15*f3087befSAndrew Turner uint32x4_t off, offset_lower_bound; 16*f3087befSAndrew Turner uint16x8_t special_bound; 17*f3087befSAndrew Turner uint32x4_t mantissa_mask; 18*f3087befSAndrew Turner float c1, c3, c5, c7; 19*f3087befSAndrew Turner } data = { 20*f3087befSAndrew Turner /* Coefficients generated using Remez algorithm approximate 21*f3087befSAndrew Turner log2(1+r)/r for r in [ -1/3, 1/3 ]. 22*f3087befSAndrew Turner rel error: 0x1.c4c4b0cp-26. */ 23*f3087befSAndrew Turner .c0 = V4 (0x1.715476p0f), /* (float)(1 / ln(2)). */ 24*f3087befSAndrew Turner .c1 = -0x1.715458p-1f, 25*f3087befSAndrew Turner .c2 = V4 (0x1.ec701cp-2f), 26*f3087befSAndrew Turner .c3 = -0x1.7171a4p-2f, 27*f3087befSAndrew Turner .c4 = V4 (0x1.27a0b8p-2f), 28*f3087befSAndrew Turner .c5 = -0x1.e5143ep-3f, 29*f3087befSAndrew Turner .c6 = V4 (0x1.9d8ecap-3f), 30*f3087befSAndrew Turner .c7 = -0x1.c675bp-3f, 31*f3087befSAndrew Turner .c8 = V4 (0x1.9e495p-3f), 32*f3087befSAndrew Turner /* Lower bound is the smallest positive normal float 0x00800000. For 33*f3087befSAndrew Turner optimised register use subnormals are detected after offset has been 34*f3087befSAndrew Turner subtracted, so lower bound is 0x0080000 - offset (which wraps around). */ 35*f3087befSAndrew Turner .offset_lower_bound = V4 (0x00800000 - 0x3f2aaaab), 36*f3087befSAndrew Turner .special_bound = V8 (0x7f00), /* top16(asuint32(inf) - 0x00800000). */ 37*f3087befSAndrew Turner .off = V4 (0x3f2aaaab), /* 0.666667. */ 38*f3087befSAndrew Turner .mantissa_mask = V4 (0x007fffff), 39*f3087befSAndrew Turner }; 40*f3087befSAndrew Turner 41*f3087befSAndrew Turner static float32x4_t VPCS_ATTR NOINLINE 42*f3087befSAndrew Turner special_case (float32x4_t n, uint32x4_t u_off, float32x4_t p, float32x4_t r, 43*f3087befSAndrew Turner uint16x4_t cmp, const struct data *d) 44*f3087befSAndrew Turner { 45*f3087befSAndrew Turner /* Fall back to scalar code. */ 46*f3087befSAndrew Turner return v_call_f32 (log2f, vreinterpretq_f32_u32 (vaddq_u32 (u_off, d->off)), 47*f3087befSAndrew Turner vfmaq_f32 (n, p, r), vmovl_u16 (cmp)); 48*f3087befSAndrew Turner } 49*f3087befSAndrew Turner 50*f3087befSAndrew Turner /* Fast implementation for single precision AdvSIMD log2, 51*f3087befSAndrew Turner relies on same argument reduction as AdvSIMD logf. 52*f3087befSAndrew Turner Maximum error: 2.48 ULPs 53*f3087befSAndrew Turner _ZGVnN4v_log2f(0x1.558174p+0) got 0x1.a9be84p-2 54*f3087befSAndrew Turner want 0x1.a9be8p-2. */ 55*f3087befSAndrew Turner float32x4_t VPCS_ATTR NOINLINE V_NAME_F1 (log2) (float32x4_t x) 56*f3087befSAndrew Turner { 57*f3087befSAndrew Turner const struct data *d = ptr_barrier (&data); 58*f3087befSAndrew Turner 59*f3087befSAndrew Turner /* To avoid having to mov x out of the way, keep u after offset has been 60*f3087befSAndrew Turner applied, and recover x by adding the offset back in the special-case 61*f3087befSAndrew Turner handler. */ 62*f3087befSAndrew Turner uint32x4_t u_off = vreinterpretq_u32_f32 (x); 63*f3087befSAndrew Turner 64*f3087befSAndrew Turner /* x = 2^n * (1+r), where 2/3 < 1+r < 4/3. */ 65*f3087befSAndrew Turner u_off = vsubq_u32 (u_off, d->off); 66*f3087befSAndrew Turner float32x4_t n = vcvtq_f32_s32 ( 67*f3087befSAndrew Turner vshrq_n_s32 (vreinterpretq_s32_u32 (u_off), 23)); /* signextend. */ 68*f3087befSAndrew Turner 69*f3087befSAndrew Turner uint16x4_t special = vcge_u16 (vsubhn_u32 (u_off, d->offset_lower_bound), 70*f3087befSAndrew Turner vget_low_u16 (d->special_bound)); 71*f3087befSAndrew Turner 72*f3087befSAndrew Turner uint32x4_t u = vaddq_u32 (vandq_u32 (u_off, d->mantissa_mask), d->off); 73*f3087befSAndrew Turner float32x4_t r = vsubq_f32 (vreinterpretq_f32_u32 (u), v_f32 (1.0f)); 74*f3087befSAndrew Turner 75*f3087befSAndrew Turner /* y = log2(1+r) + n. */ 76*f3087befSAndrew Turner float32x4_t r2 = vmulq_f32 (r, r); 77*f3087befSAndrew Turner 78*f3087befSAndrew Turner float32x4_t c1357 = vld1q_f32 (&d->c1); 79*f3087befSAndrew Turner float32x4_t c01 = vfmaq_laneq_f32 (d->c0, r, c1357, 0); 80*f3087befSAndrew Turner float32x4_t c23 = vfmaq_laneq_f32 (d->c2, r, c1357, 1); 81*f3087befSAndrew Turner float32x4_t c45 = vfmaq_laneq_f32 (d->c4, r, c1357, 2); 82*f3087befSAndrew Turner float32x4_t c67 = vfmaq_laneq_f32 (d->c6, r, c1357, 3); 83*f3087befSAndrew Turner float32x4_t p68 = vfmaq_f32 (c67, r2, d->c8); 84*f3087befSAndrew Turner float32x4_t p48 = vfmaq_f32 (c45, r2, p68); 85*f3087befSAndrew Turner float32x4_t p28 = vfmaq_f32 (c23, r2, p48); 86*f3087befSAndrew Turner float32x4_t p = vfmaq_f32 (c01, r2, p28); 87*f3087befSAndrew Turner 88*f3087befSAndrew Turner if (unlikely (v_any_u16h (special))) 89*f3087befSAndrew Turner return special_case (n, u_off, p, r, special, d); 90*f3087befSAndrew Turner return vfmaq_f32 (n, p, r); 91*f3087befSAndrew Turner } 92*f3087befSAndrew Turner 93*f3087befSAndrew Turner HALF_WIDTH_ALIAS_F1 (log2) 94*f3087befSAndrew Turner 95*f3087befSAndrew Turner TEST_SIG (V, F, 1, log2, 0.01, 11.1) 96*f3087befSAndrew Turner TEST_ULP (V_NAME_F1 (log2), 1.99) 97*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (log2), -0.0, -0x1p126, 100) 98*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (log2), 0x1p-149, 0x1p-126, 4000) 99*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (log2), 0x1p-126, 0x1p-23, 50000) 100*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (log2), 0x1p-23, 1.0, 50000) 101*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (log2), 1.0, 100, 50000) 102*f3087befSAndrew Turner TEST_INTERVAL (V_NAME_F1 (log2), 100, inf, 50000) 103