xref: /llvm-project/llvm/test/CodeGen/X86/lsr-reuse-trunc.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; RUN: llc < %s -mtriple=x86_64-linux -mcpu=nehalem | FileCheck %s
2; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=nehalem | FileCheck %s
3
4; Full strength reduction wouldn't reduce register pressure, so LSR should
5; stick with indexing here.
6
7; CHECK: movaps        (%{{rsi|rdx}},%rax,4), [[X3:%xmm[0-9]+]]
8; CHECK: cvtdq2ps
9; CHECK: orps          {{%xmm[0-9]+}}, [[X4:%xmm[0-9]+]]
10; CHECK: movaps        [[X4]], (%{{rdi|rcx}},%rax,4)
11; CHECK: addq  $4, %rax
12; CHECK: cmpl  %eax, (%{{rdx|r8}})
13; CHECK-NEXT: jg
14
15define void @vvfloorf(ptr nocapture %y, ptr nocapture %x, ptr nocapture %n) nounwind {
16entry:
17  %0 = load i32, ptr %n, align 4
18  %1 = icmp sgt i32 %0, 0
19  br i1 %1, label %bb, label %return
20
21bb:
22  %indvar = phi i64 [ %indvar.next, %bb ], [ 0, %entry ]
23  %tmp = shl i64 %indvar, 2
24  %scevgep = getelementptr float, ptr %y, i64 %tmp
25  %scevgep10 = getelementptr float, ptr %x, i64 %tmp
26  %2 = load <4 x float>, ptr %scevgep10, align 16
27  %3 = bitcast <4 x float> %2 to <4 x i32>
28  %4 = and <4 x i32> %3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
29  %5 = bitcast <4 x i32> %4 to <4 x float>
30  %6 = and <4 x i32> %3, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>
31  %7 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %5, <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>, i8 5) nounwind
32  %tmp.i4 = bitcast <4 x float> %7 to <4 x i32>
33  %8 = xor <4 x i32> %tmp.i4, <i32 -1, i32 -1, i32 -1, i32 -1>
34  %9 = and <4 x i32> %8, <i32 1258291200, i32 1258291200, i32 1258291200, i32 1258291200>
35  %10 = or <4 x i32> %9, %6
36  %11 = bitcast <4 x i32> %10 to <4 x float>
37  %12 = fadd <4 x float> %2, %11
38  %13 = fsub <4 x float> %12, %11
39  %14 = tail call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %2, <4 x float> %13, i8 1) nounwind
40  %15 = bitcast <4 x float> %14 to <4 x i32>
41  %16 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %15) nounwind readnone
42  %17 = fadd <4 x float> %13, %16
43  %tmp.i = bitcast <4 x float> %17 to <4 x i32>
44  %18 = or <4 x i32> %tmp.i, %6
45  %19 = bitcast <4 x i32> %18 to <4 x float>
46  store <4 x float> %19, ptr %scevgep, align 16
47  %tmp12 = add i64 %tmp, 4
48  %tmp13 = trunc i64 %tmp12 to i32
49  %20 = load i32, ptr %n, align 4
50  %21 = icmp sgt i32 %20, %tmp13
51  %indvar.next = add i64 %indvar, 1
52  br i1 %21, label %bb, label %return
53
54return:
55  ret void
56}
57
58declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
59
60declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
61