1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 2; RUN: llc -mtriple=x86_64-unknown-unknown < %s | FileCheck -check-prefixes=X64 %s 3; XUN: llc -mtriple=i386-pc-win32 < %s | FileCheck -check-prefix=WIN32 %s 4; FIXME: Expansion support without libcalls 5 6; FIXME: Implement f16->f32 promotion for strictfp 7; define half @test_strict_ldexp_f16_i32(ptr addrspace(1) %out, half %a, i32 %b) #2 { 8; %result = call half @llvm.experimental.constrained.ldexp.f16.i32(half %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") 9; ret half %result 10; } 11 12define float @test_strict_ldexp_f32_i32(ptr addrspace(1) %out, float %a, i32 %b) #2 { 13; X64-LABEL: test_strict_ldexp_f32_i32: 14; X64: # %bb.0: 15; X64-NEXT: pushq %rax 16; X64-NEXT: .cfi_def_cfa_offset 16 17; X64-NEXT: movl %esi, %edi 18; X64-NEXT: callq ldexpf@PLT 19; X64-NEXT: popq %rax 20; X64-NEXT: .cfi_def_cfa_offset 8 21; X64-NEXT: retq 22 %result = call float @llvm.experimental.constrained.ldexp.f32.i32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") 23 ret float %result 24} 25 26define double @test_strict_ldexp_f64_i32(ptr addrspace(1) %out, double %a, i32 %b) #2 { 27; X64-LABEL: test_strict_ldexp_f64_i32: 28; X64: # %bb.0: 29; X64-NEXT: pushq %rax 30; X64-NEXT: .cfi_def_cfa_offset 16 31; X64-NEXT: movl %esi, %edi 32; X64-NEXT: callq ldexp@PLT 33; X64-NEXT: popq %rax 34; X64-NEXT: .cfi_def_cfa_offset 8 35; X64-NEXT: retq 36 %result = call double @llvm.experimental.constrained.ldexp.f64.i32(double %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") 37 ret double %result 38} 39 40 41define <2 x float> @test_strict_ldexp_v2f32_v2i32(ptr addrspace(1) %out, <2 x float> %a, <2 x i32> %b) #2 { 42; X64-LABEL: test_strict_ldexp_v2f32_v2i32: 43; X64: # %bb.0: 44; X64-NEXT: subq $56, %rsp 45; X64-NEXT: .cfi_def_cfa_offset 64 46; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 47; X64-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill 48; X64-NEXT: movd %xmm1, %edi 49; X64-NEXT: callq ldexpf@PLT 50; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 51; X64-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload 52; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] 53; X64-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload 54; X64-NEXT: # xmm1 = mem[1,1,1,1] 55; X64-NEXT: movd %xmm1, %edi 56; X64-NEXT: callq ldexpf@PLT 57; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload 58; X64-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] 59; X64-NEXT: movaps %xmm1, %xmm0 60; X64-NEXT: addq $56, %rsp 61; X64-NEXT: .cfi_def_cfa_offset 8 62; X64-NEXT: retq 63 %result = call <2 x float> @llvm.experimental.constrained.ldexp.v2f32.v2i32(<2 x float> %a, <2 x i32> %b, metadata !"round.dynamic", metadata !"fpexcept.strict") 64 ret <2 x float> %result 65} 66 67declare half @llvm.experimental.constrained.ldexp.f16.i32(half, i32, metadata, metadata) #1 68declare float @llvm.experimental.constrained.ldexp.f32.i32(float, i32, metadata, metadata) #1 69declare double @llvm.experimental.constrained.ldexp.f64.i32(double, i32, metadata, metadata) #1 70declare x86_fp80 @llvm.experimental.constrained.ldexp.f80.i32(x86_fp80, i32, metadata, metadata) #1 71declare <2 x float> @llvm.experimental.constrained.ldexp.v2f32.v2i32(<2 x float>, <2 x i32>, metadata, metadata) #1 72 73attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) } 74attributes #1 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) } 75attributes #2 = { strictfp } 76