1; RUN: opt -mtriple=x86_64-unknown-linux-gnu -vector-library=SVML -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,SVML 2; RUN: opt -mtriple=x86_64-unknown-linux-gnu -vector-library=AMDLIBM -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,AMDLIBM 3; RUN: opt -mtriple=powerpc64-unknown-linux-gnu -vector-library=MASSV -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,MASSV 4; RUN: opt -mtriple=x86_64-unknown-linux-gnu -vector-library=LIBMVEC-X86 -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,LIBMVEC-X86 5; RUN: opt -mtriple=x86_64-unknown-linux-gnu -vector-library=Accelerate -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,ACCELERATE 6; RUN: opt -mtriple=aarch64-unknown-linux-gnu -vector-library=sleefgnuabi -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,SLEEFGNUABI 7; RUN: opt -mtriple=riscv64-unknown-linux-gnu -vector-library=sleefgnuabi -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,SLEEFGNUABI_RISCV 8; RUN: opt -mtriple=aarch64-unknown-linux-gnu -vector-library=ArmPL -passes=inject-tli-mappings -S < %s | FileCheck %s --check-prefixes=COMMON,ARMPL 9 10; COMMON-LABEL: @llvm.compiler.used = appending global 11; SVML-SAME: [6 x ptr] [ 12; SVML-SAME: ptr @__svml_sin2, 13; SVML-SAME: ptr @__svml_sin4, 14; SVML-SAME: ptr @__svml_sin8, 15; SVML-SAME: ptr @__svml_log10f4, 16; SVML-SAME: ptr @__svml_log10f8, 17; SVML-SAME: ptr @__svml_log10f16 18; AMDLIBM-SAME: [11 x ptr] [ 19; AMDLIBM-SAME: ptr @amd_vrd2_sin, 20; AMDLIBM-SAME: ptr @amd_vrd4_sin, 21; AMDLIBM-SAME: ptr @amd_vrd8_sin, 22; AMDLIBM-SAME: ptr @amd_vrd4_sincos, 23; AMDLIBM-SAME: ptr @amd_vrd8_sincos, 24; AMDLIBM-SAME: ptr @amd_vrs4_sincosf, 25; AMDLIBM-SAME: ptr @amd_vrs8_sincosf, 26; AMDLIBM-SAME: ptr @amd_vrs16_sincosf 27; AMDLIBM-SAME: ptr @amd_vrs4_log10f, 28; AMDLIBM-SAME: ptr @amd_vrs8_log10f, 29; AMDLIBM-SAME: ptr @amd_vrs16_log10f 30; MASSV-SAME: [2 x ptr] [ 31; MASSV-SAME: ptr @__sind2, 32; MASSV-SAME: ptr @__log10f4 33; ACCELERATE-SAME: [1 x ptr] [ 34; ACCELERATE-SAME: ptr @vlog10f 35; LIBMVEC-X86-SAME: [2 x ptr] [ 36; LIBMVEC-X86-SAME: ptr @_ZGVbN2v_sin, 37; LIBMVEC-X86-SAME: ptr @_ZGVdN4v_sin 38; SLEEFGNUABI-SAME: [16 x ptr] [ 39; SLEEFGNUABI-SAME: ptr @_ZGVnN2vl8_modf, 40; SLEEFGNUABI-SAME: ptr @_ZGVsNxvl8_modf, 41; SLEEFGNUABI-SAME: ptr @_ZGVnN4vl4_modff, 42; SLEEFGNUABI-SAME: ptr @_ZGVsNxvl4_modff, 43; SLEEFGNUABI-SAME: ptr @_ZGVnN2v_sin, 44; SLEEFGNUABI-SAME: ptr @_ZGVsMxv_sin, 45; SLEEFGNUABI-SAME: ptr @_ZGVnN2vl8l8_sincos, 46; SLEEFGNUABI-SAME: ptr @_ZGVsNxvl8l8_sincos, 47; SLEEFGNUABI-SAME: ptr @_ZGVnN4vl4l4_sincosf, 48; SLEEFGNUABI-SAME: ptr @_ZGVsNxvl4l4_sincosf, 49; SLEEFGNUABI-SAME: ptr @_ZGVnN2vl8l8_sincospi, 50; SLEEFGNUABI-SAME: ptr @_ZGVsNxvl8l8_sincospi, 51; SLEEFGNUABI-SAME: ptr @_ZGVnN4vl4l4_sincospif, 52; SLEEFGNUABI-SAME: ptr @_ZGVsNxvl4l4_sincospif, 53; SLEEFGNUABI_SAME; ptr @_ZGVnN4v_log10f, 54; SLEEFGNUABI-SAME: ptr @_ZGVsMxv_log10f 55; ARMPL-SAME: [16 x ptr] [ 56; ARMPL-SAME: ptr @armpl_vmodfq_f64, 57; ARMPL-SAME: ptr @armpl_svmodf_f64_x, 58; ARMPL-SAME: ptr @armpl_vmodfq_f32, 59; ARMPL-SAME: ptr @armpl_svmodf_f32_x, 60; ARMPL-SAME: ptr @armpl_vsinq_f64, 61; ARMPL-SAME: ptr @armpl_svsin_f64_x, 62; ARMPL-SAME: ptr @armpl_vsincosq_f64, 63; ARMPL-SAME: ptr @armpl_svsincos_f64_x, 64; ARMPL-SAME: ptr @armpl_vsincosq_f32, 65; ARMPL-SAME: ptr @armpl_svsincos_f32_x, 66; ARMPL-SAME: ptr @armpl_vsincospiq_f64, 67; ARMPL-SAME: ptr @armpl_svsincospi_f64_x, 68; ARMPL-SAME: ptr @armpl_vsincospiq_f32, 69; ARMPL-SAME: ptr @armpl_svsincospi_f32_x, 70; ARMPL-SAME: ptr @armpl_vlog10q_f32, 71; ARMPL-SAME: ptr @armpl_svlog10_f32_x 72; COMMON-SAME: ], section "llvm.metadata" 73 74define double @modf_f64(double %in, ptr %iptr) { 75; COMMON-LABEL: @modf_f64( 76; SLEEFGNUABI: call double @modf(double %{{.*}}, ptr %{{.*}}) #[[MODF:[0-9]+]] 77; ARMPL: call double @modf(double %{{.*}}, ptr %{{.*}}) #[[MODF:[0-9]+]] 78 %call = tail call double @modf(double %in, ptr %iptr) 79 ret double %call 80} 81 82declare double @modf(double, ptr) #0 83 84define float @modf_f32(float %in, ptr %iptr) { 85; COMMON-LABEL: @modf_f32( 86; SLEEFGNUABI: call float @modff(float %{{.*}}, ptr %{{.*}}) #[[MODFF:[0-9]+]] 87; ARMPL: call float @modff(float %{{.*}}, ptr %{{.*}}) #[[MODFF:[0-9]+]] 88 %call = tail call float @modff(float %in, ptr %iptr) 89 ret float %call 90} 91 92declare float @modff(float, ptr) #0 93 94define double @sin_f64(double %in) { 95; COMMON-LABEL: @sin_f64( 96; SVML: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]] 97; AMDLIBM: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]] 98; MASSV: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]] 99; ACCELERATE: call double @sin(double %{{.*}}) 100; LIBMVEC-X86: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]] 101; SLEEFGNUABI: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]] 102; SLEEFGNUABI_RISCV: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]] 103; ARMPL: call double @sin(double %{{.*}}) #[[SIN:[0-9]+]] 104; No mapping of "sin" to a vector function for Accelerate. 105; ACCELERATE-NOT: _ZGV_LLVM_{{.*}}_sin({{.*}}) 106 %call = tail call double @sin(double %in) 107 ret double %call 108} 109 110declare double @sin(double) #0 111 112define void @sincos_f64(double %in, ptr %sin, ptr %cos) { 113; COMMON-LABEL: @sincos_f64( 114; SLEEFGNUABI: call void @sincos(double %{{.*}}, ptr %{{.*}}, ptr %{{.*}}) #[[SINCOS:[0-9]+]] 115; ARMPL: call void @sincos(double %{{.*}}, ptr %{{.*}}, ptr %{{.*}}) #[[SINCOS:[0-9]+]] 116; AMDLIBM: call void @sincos(double %{{.*}}, ptr %{{.*}}, ptr %{{.*}}) #[[SINCOS:[0-9]+]] 117 call void @sincos(double %in, ptr %sin, ptr %cos) 118 ret void 119} 120 121declare void @sincos(double, ptr, ptr) #0 122 123define void @sincos_f32(float %in, ptr %sin, ptr %cos) { 124; COMMON-LABEL: @sincos_f32( 125; SLEEFGNUABI: call void @sincosf(float %{{.*}}, ptr %{{.*}}, ptr %{{.*}}) #[[SINCOSF:[0-9]+]] 126; ARMPL: call void @sincosf(float %{{.*}}, ptr %{{.*}}, ptr %{{.*}}) #[[SINCOSF:[0-9]+]] 127; AMDLIBM: call void @sincosf(float %{{.*}}, ptr %{{.*}}, ptr %{{.*}}) #[[SINCOSF:[0-9]+]] 128 call void @sincosf(float %in, ptr %sin, ptr %cos) 129 ret void 130} 131 132declare void @sincosf(float, ptr, ptr) #0 133 134define void @sincospi_f64(double %in, ptr %sin, ptr %cos) { 135; COMMON-LABEL: @sincospi_f64( 136; SLEEFGNUABI: call void @sincospi(double %{{.*}}, ptr %{{.*}}, ptr %{{.*}}) #[[SINCOSPI:[0-9]+]] 137; ARMPL: call void @sincospi(double %{{.*}}, ptr %{{.*}}, ptr %{{.*}}) #[[SINCOSPI:[0-9]+]] 138 call void @sincospi(double %in, ptr %sin, ptr %cos) 139 ret void 140} 141 142declare void @sincospi(double, ptr, ptr) #0 143 144define void @sincospi_f32(float %in, ptr %sin, ptr %cos) { 145; COMMON-LABEL: @sincospi_f32( 146; SLEEFGNUABI: call void @sincospif(float %{{.*}}, ptr %{{.*}}, ptr %{{.*}}) #[[SINCOSPIF:[0-9]+]] 147; ARMPL: call void @sincospif(float %{{.*}}, ptr %{{.*}}, ptr %{{.*}}) #[[SINCOSPIF:[0-9]+]] 148 call void @sincospif(float %in, ptr %sin, ptr %cos) 149 ret void 150} 151 152declare void @sincospif(float, ptr, ptr) #0 153 154define float @call_llvm.log10.f32(float %in) { 155; COMMON-LABEL: @call_llvm.log10.f32( 156; SVML: call float @llvm.log10.f32(float %{{.*}}) 157; AMDLIBM: call float @llvm.log10.f32(float %{{.*}}) #[[LOG10:[0-9]+]] 158; LIBMVEC-X86: call float @llvm.log10.f32(float %{{.*}}) 159; MASSV: call float @llvm.log10.f32(float %{{.*}}) #[[LOG10:[0-9]+]] 160; ACCELERATE: call float @llvm.log10.f32(float %{{.*}}) #[[LOG10:[0-9]+]] 161; SLEEFGNUABI: call float @llvm.log10.f32(float %{{.*}}) #[[LOG10:[0-9]+]] 162; SLEEFGNUABI_RISCV: call float @llvm.log10.f32(float %{{.*}}) #[[LOG10:[0-9]+]] 163; ARMPL: call float @llvm.log10.f32(float %{{.*}}) #[[LOG10:[0-9]+]] 164; No mapping of "llvm.log10.f32" to a vector function for SVML. 165; SVML-NOT: _ZGV_LLVM_{{.*}}_llvm.log10.f32({{.*}}) 166; AMDLIBM-NOT: _ZGV_LLVM_{{.*}}_llvm.log10.f32({{.*}}) 167; LIBMVEC-X86-NOT: _ZGV_LLVM_{{.*}}_llvm.log10.f32({{.*}}) 168 %call = tail call float @llvm.log10.f32(float %in) 169 ret float %call 170} 171 172declare float @llvm.log10.f32(float) #0 173 174; SVML: declare <2 x double> @__svml_sin2(<2 x double>) 175; SVML: declare <4 x double> @__svml_sin4(<4 x double>) 176; SVML: declare <8 x double> @__svml_sin8(<8 x double>) 177; SVML: declare <4 x float> @__svml_log10f4(<4 x float>) 178; SVML: declare <8 x float> @__svml_log10f8(<8 x float>) 179; SVML: declare <16 x float> @__svml_log10f16(<16 x float>) 180 181; AMDLIBM: declare <2 x double> @amd_vrd2_sin(<2 x double>) 182; AMDLIBM: declare <4 x double> @amd_vrd4_sin(<4 x double>) 183; AMDLIBM: declare <8 x double> @amd_vrd8_sin(<8 x double>) 184; AMDLIBM: declare void @amd_vrd4_sincos(<4 x double>, ptr, ptr) 185; AMDLIBM: declare void @amd_vrd8_sincos(<8 x double>, ptr, ptr) 186; AMDLIBM: declare void @amd_vrs4_sincosf(<4 x float>, ptr, ptr) 187; AMDLIBM: declare void @amd_vrs8_sincosf(<8 x float>, ptr, ptr) 188; AMDLIBM: declare void @amd_vrs16_sincosf(<16 x float>, ptr, ptr) 189; AMDLIBM: declare <4 x float> @amd_vrs4_log10f(<4 x float>) 190; AMDLIBM: declare <8 x float> @amd_vrs8_log10f(<8 x float>) 191; AMDLIBM: declare <16 x float> @amd_vrs16_log10f(<16 x float>) 192 193; MASSV: declare <2 x double> @__sind2(<2 x double>) 194; MASSV: declare <4 x float> @__log10f4(<4 x float>) 195 196; LIBMVEC-X86: declare <2 x double> @_ZGVbN2v_sin(<2 x double>) 197; LIBMVEC-X86: declare <4 x double> @_ZGVdN4v_sin(<4 x double>) 198 199; ACCELERATE: declare <4 x float> @vlog10f(<4 x float>) 200 201; SLEEFGNUABI: declare <2 x double> @_ZGVnN2vl8_modf(<2 x double>, ptr) 202; SLEEFGNUABI: declare <vscale x 2 x double> @_ZGVsNxvl8_modf(<vscale x 2 x double>, ptr) 203; SLEEFGNUABI: declare <4 x float> @_ZGVnN4vl4_modff(<4 x float>, ptr) 204; SLEEFGNUABI: declare <vscale x 4 x float> @_ZGVsNxvl4_modff(<vscale x 4 x float>, ptr) 205; SLEEFGNUABI: declare <2 x double> @_ZGVnN2v_sin(<2 x double>) 206; SLEEFGNUABI: declare <vscale x 2 x double> @_ZGVsMxv_sin(<vscale x 2 x double>, <vscale x 2 x i1>) 207; SLEEFGNUABI: declare void @_ZGVnN2vl8l8_sincos(<2 x double>, ptr, ptr) 208; SLEEFGNUABI: declare void @_ZGVsNxvl8l8_sincos(<vscale x 2 x double>, ptr, ptr) 209; SLEEFGNUABI: declare void @_ZGVnN4vl4l4_sincosf(<4 x float>, ptr, ptr) 210; SLEEFGNUABI: declare void @_ZGVsNxvl4l4_sincosf(<vscale x 4 x float>, ptr, ptr) 211; SLEEFGNUABI: declare void @_ZGVnN2vl8l8_sincospi(<2 x double>, ptr, ptr) 212; SLEEFGNUABI: declare void @_ZGVsNxvl8l8_sincospi(<vscale x 2 x double>, ptr, ptr) 213; SLEEFGNUABI: declare void @_ZGVnN4vl4l4_sincospif(<4 x float>, ptr, ptr) 214; SLEEFGNUABI: declare void @_ZGVsNxvl4l4_sincospif(<vscale x 4 x float>, ptr, ptr) 215; SLEEFGNUABI: declare <4 x float> @_ZGVnN4v_log10f(<4 x float>) 216; SLEEFGNUABI: declare <vscale x 4 x float> @_ZGVsMxv_log10f(<vscale x 4 x float>, <vscale x 4 x i1>) 217 218; SLEEFGNUABI_RISCV: declare <vscale x 2 x double> @Sleef_sindx_u10rvvm2(<vscale x 2 x double>) 219; SLEEFGNUABI_RISCV: declare <vscale x 4 x float> @Sleef_log10fx_u10rvvm2(<vscale x 4 x float>) 220 221; ARMPL: declare <2 x double> @armpl_vmodfq_f64(<2 x double>, ptr) 222; ARMPL: declare <vscale x 2 x double> @armpl_svmodf_f64_x(<vscale x 2 x double>, ptr, <vscale x 2 x i1>) 223; ARMPL: declare <4 x float> @armpl_vmodfq_f32(<4 x float>, ptr) 224; ARMPL: declare <vscale x 4 x float> @armpl_svmodf_f32_x(<vscale x 4 x float>, ptr, <vscale x 4 x i1>) 225; ARMPL: declare <2 x double> @armpl_vsinq_f64(<2 x double>) 226; ARMPL: declare <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double>, <vscale x 2 x i1>) 227; ARMPL: declare void @armpl_vsincosq_f64(<2 x double>, ptr, ptr) 228; ARMPL: declare void @armpl_svsincos_f64_x(<vscale x 2 x double>, ptr, ptr, <vscale x 2 x i1>) 229; ARMPL: declare void @armpl_vsincosq_f32(<4 x float>, ptr, ptr) 230; ARMPL: declare void @armpl_svsincos_f32_x(<vscale x 4 x float>, ptr, ptr, <vscale x 4 x i1>) 231; ARMPL: declare void @armpl_vsincospiq_f64(<2 x double>, ptr, ptr) 232; ARMPL: declare void @armpl_svsincospi_f64_x(<vscale x 2 x double>, ptr, ptr, <vscale x 2 x i1>) 233; ARMPL: declare void @armpl_vsincospiq_f32(<4 x float>, ptr, ptr) 234; ARMPL: declare void @armpl_svsincospi_f32_x(<vscale x 4 x float>, ptr, ptr, <vscale x 4 x i1>) 235; ARMPL: declare <4 x float> @armpl_vlog10q_f32(<4 x float>) 236; ARMPL: declare <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float>, <vscale x 4 x i1>) 237 238attributes #0 = { nounwind readnone } 239 240; SVML: attributes #[[SIN]] = { "vector-function-abi-variant"= 241; SVML-SAME: "_ZGV_LLVM_N2v_sin(__svml_sin2), 242; SVML-SAME: _ZGV_LLVM_N4v_sin(__svml_sin4), 243; SVML-SAME: _ZGV_LLVM_N8v_sin(__svml_sin8)" } 244 245; AMDLIBM: attributes #[[SIN]] = { "vector-function-abi-variant"= 246; AMDLIBM-SAME: "_ZGV_LLVM_N2v_sin(amd_vrd2_sin), 247; AMDLIBM-SAME: _ZGV_LLVM_N4v_sin(amd_vrd4_sin), 248; AMDLIBM-SAME: _ZGV_LLVM_N8v_sin(amd_vrd8_sin)" } 249; AMDLIBM: attributes #[[SINCOS]] = { "vector-function-abi-variant"= 250; AMDLIBM-SAME: "_ZGV_LLVM_N4vl8l8_sincos(amd_vrd4_sincos), 251; AMDLIBM-SAME: _ZGV_LLVM_N8vl8l8_sincos(amd_vrd8_sincos)" } 252; AMDLIBM: attributes #[[SINCOSF]] = { "vector-function-abi-variant"= 253; AMDLIBM-SAME: "_ZGV_LLVM_N4vl4l4_sincosf(amd_vrs4_sincosf), 254; AMDLIBM-SAME: _ZGV_LLVM_N8vl4l4_sincosf(amd_vrs8_sincosf), 255; AMDLIBM-SAME: _ZGV_LLVM_N16vl4l4_sincosf(amd_vrs16_sincosf)" } 256; AMDLIBM: attributes #[[LOG10]] = { "vector-function-abi-variant"= 257; AMDLIBM-SAME: "_ZGV_LLVM_N4v_llvm.log10.f32(amd_vrs4_log10f), 258; AMDLIBM-SAME: _ZGV_LLVM_N8v_llvm.log10.f32(amd_vrs8_log10f), 259; AMDLIBM-SAME: _ZGV_LLVM_N16v_llvm.log10.f32(amd_vrs16_log10f)" } 260 261; MASSV: attributes #[[SIN]] = { "vector-function-abi-variant"= 262; MASSV-SAME: "_ZGV_LLVM_N2v_sin(__sind2)" } 263; MASSV: attributes #[[LOG10]] = { "vector-function-abi-variant"= 264; MASSV-SAME: "_ZGV_LLVM_N4v_llvm.log10.f32(__log10f4)" } 265 266; ACCELERATE: attributes #[[LOG10]] = { "vector-function-abi-variant"= 267; ACCELERATE-SAME: "_ZGV_LLVM_N4v_llvm.log10.f32(vlog10f)" } 268 269; LIBMVEC-X86: attributes #[[SIN]] = { "vector-function-abi-variant"= 270; LIBMVEC-X86-SAME: "_ZGV_LLVM_N2v_sin(_ZGVbN2v_sin), 271; LIBMVEC-X86-SAME: _ZGV_LLVM_N4v_sin(_ZGVdN4v_sin)" } 272 273; SLEEFGNUABI: attributes #[[MODF]] = { "vector-function-abi-variant"= 274; SLEEFGNUABI-SAME: "_ZGV_LLVM_N2vl8_modf(_ZGVnN2vl8_modf), 275; SLEEFGNUABI-SAME: _ZGVsNxvl8_modf(_ZGVsNxvl8_modf)" } 276; SLEEFGNUABI: attributes #[[MODFF]] = { "vector-function-abi-variant"= 277; SLEEFGNUABI-SAME: "_ZGV_LLVM_N4vl4_modff(_ZGVnN4vl4_modff), 278; SLEEFGNUABI-SAME: _ZGVsNxvl4_modff(_ZGVsNxvl4_modff)" } 279; SLEEFGNUABI: attributes #[[SIN]] = { "vector-function-abi-variant"= 280; SLEEFGNUABI-SAME: "_ZGV_LLVM_N2v_sin(_ZGVnN2v_sin), 281; SLEEFGNUABI-SAME: _ZGVsMxv_sin(_ZGVsMxv_sin)" } 282; SLEEFGNUABI: attributes #[[SINCOS]] = { "vector-function-abi-variant"= 283; SLEEFGNUABI-SAME: "_ZGV_LLVM_N2vl8l8_sincos(_ZGVnN2vl8l8_sincos), 284; SLEEFGNUABI-SAME: _ZGVsNxvl8l8_sincos(_ZGVsNxvl8l8_sincos)" } 285; SLEEFGNUABI: attributes #[[SINCOSF]] = { "vector-function-abi-variant"= 286; SLEEFGNUABI-SAME: "_ZGV_LLVM_N4vl4l4_sincosf(_ZGVnN4vl4l4_sincosf), 287; SLEEFGNUABI-SAME: _ZGVsNxvl4l4_sincosf(_ZGVsNxvl4l4_sincosf)" } 288; SLEEFGNUABI: attributes #[[SINCOSPI]] = { "vector-function-abi-variant"= 289; SLEEFGNUABI-SAME: "_ZGV_LLVM_N2vl8l8_sincospi(_ZGVnN2vl8l8_sincospi), 290; SLEEFGNUABI-SAME: _ZGVsNxvl8l8_sincospi(_ZGVsNxvl8l8_sincospi)" } 291; SLEEFGNUABI: attributes #[[SINCOSPIF]] = { "vector-function-abi-variant"= 292; SLEEFGNUABI-SAME: "_ZGV_LLVM_N4vl4l4_sincospif(_ZGVnN4vl4l4_sincospif), 293; SLEEFGNUABI-SAME: _ZGVsNxvl4l4_sincospif(_ZGVsNxvl4l4_sincospif)" } 294; SLEEFGNUABI: attributes #[[LOG10]] = { "vector-function-abi-variant"= 295; SLEEFGNUABI-SAME: "_ZGV_LLVM_N4v_llvm.log10.f32(_ZGVnN4v_log10f), 296; SLEEFGNUABI-SAME: _ZGVsMxv_llvm.log10.f32(_ZGVsMxv_log10f)" } 297 298; SLEEFGNUABI_RISCV: attributes #[[SIN]] = { "vector-function-abi-variant"= 299; SLEEFGNUABI_RISCV-SAME: "_ZGVrNxv_sin(Sleef_sindx_u10rvvm2)" } 300; SLEEFGNUABI_RISCV: attributes #[[LOG10]] = { "vector-function-abi-variant"= 301; SLEEFGNUABI_RISCV-SAME: "_ZGVrNxv_llvm.log10.f32(Sleef_log10fx_u10rvvm2)" } 302 303; ARMPL: attributes #[[MODF]] = { "vector-function-abi-variant"= 304; ARMPL-SAME: "_ZGV_LLVM_N2vl8_modf(armpl_vmodfq_f64), 305; ARMPL-SAME: _ZGVsMxvl8_modf(armpl_svmodf_f64_x)" } 306; ARMPL: attributes #[[MODFF]] = { "vector-function-abi-variant"= 307; ARMPL-SAME: "_ZGV_LLVM_N4vl4_modff(armpl_vmodfq_f32), 308; ARMPL-SAME: _ZGVsMxvl4_modff(armpl_svmodf_f32_x)" } 309; ARMPL: attributes #[[SIN]] = { "vector-function-abi-variant"= 310; ARMPL-SAME: "_ZGV_LLVM_N2v_sin(armpl_vsinq_f64), 311; ARMPL-SAME: _ZGVsMxv_sin(armpl_svsin_f64_x)" } 312; ARMPL: attributes #[[SINCOS]] = { "vector-function-abi-variant"= 313; ARMPL-SAME: "_ZGV_LLVM_N2vl8l8_sincos(armpl_vsincosq_f64), 314; ARMPL-SAME: _ZGVsMxvl8l8_sincos(armpl_svsincos_f64_x)" } 315; ARMPL: attributes #[[SINCOSF]] = { "vector-function-abi-variant"= 316; ARMPL-SAME: "_ZGV_LLVM_N4vl4l4_sincosf(armpl_vsincosq_f32), 317; ARMPL-SAME: _ZGVsMxvl4l4_sincosf(armpl_svsincos_f32_x)" } 318; ARMPL: attributes #[[SINCOSPI]] = { "vector-function-abi-variant"= 319; ARMPL-SAME: "_ZGV_LLVM_N2vl8l8_sincospi(armpl_vsincospiq_f64), 320; ARMPL-SAME: _ZGVsMxvl8l8_sincospi(armpl_svsincospi_f64_x)" } 321; ARMPL: attributes #[[SINCOSPIF]] = { "vector-function-abi-variant"= 322; ARMPL-SAME: "_ZGV_LLVM_N4vl4l4_sincospif(armpl_vsincospiq_f32), 323; ARMPL-SAME: _ZGVsMxvl4l4_sincospif(armpl_svsincospi_f32_x)" } 324; ARMPL: attributes #[[LOG10]] = { "vector-function-abi-variant"= 325; ARMPL-SAME: "_ZGV_LLVM_N4v_llvm.log10.f32(armpl_vlog10q_f32), 326; ARMPL-SAME: _ZGVsMxv_llvm.log10.f32(armpl_svlog10_f32_x)" } 327