xref: /llvm-project/llvm/test/CodeGen/X86/mmx-coalescing.ll (revision b7e4fba6e5dcae5ff51f8eced21470a1b3ccd895)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s
3
4%SA = type <{ %union.anon, i32, [4 x i8], ptr, ptr, ptr, i32, [4 x i8] }>
5%union.anon = type { <1 x i64> }
6
7; Check that extra movd (copy) instructions aren't generated.
8
9define i32 @test(ptr %pSA, ptr %A, i32 %B, i32 %C, i32 %D, ptr %E) {
10; CHECK-LABEL: test:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    pshufw $238, (%rdi), %mm0 # mm0 = mem[2,3,2,3]
13; CHECK-NEXT:    movd %mm0, %eax
14; CHECK-NEXT:    testl %eax, %eax
15; CHECK-NEXT:    je .LBB0_1
16; CHECK-NEXT:  # %bb.2: # %if.B
17; CHECK-NEXT:    pshufw $238, %mm0, %mm0 # mm0 = mm0[2,3,2,3]
18; CHECK-NEXT:    movq %mm0, %rax
19; CHECK-NEXT:    jmp .LBB0_3
20; CHECK-NEXT:  .LBB0_1: # %if.A
21; CHECK-NEXT:    movd %edx, %mm1
22; CHECK-NEXT:    psllq %mm1, %mm0
23; CHECK-NEXT:    movq %mm0, %rax
24; CHECK-NEXT:    testq %rax, %rax
25; CHECK-NEXT:    jne .LBB0_4
26; CHECK-NEXT:  .LBB0_3: # %if.C
27; CHECK-NEXT:    testl %eax, %eax
28; CHECK-NEXT:    je .LBB0_1
29; CHECK-NEXT:  .LBB0_4: # %merge
30; CHECK-NEXT:    pshufw $238, %mm0, %mm0 # mm0 = mm0[2,3,2,3]
31; CHECK-NEXT:    movd %mm0, %eax
32; CHECK-NEXT:    retq
33entry:
34  %shl = shl i32 1, %B
35  %shl1 = shl i32 %C, %B
36  %shl2 = shl i32 1, %D
37  %v0 = load <1 x i64>, ptr %pSA, align 8
38  %SA0 = getelementptr inbounds %SA, ptr %pSA, i64 0, i32 1
39  %v1 = load i32, ptr %SA0, align 4
40  %SA1 = getelementptr inbounds %SA, ptr %pSA, i64 0, i32 3
41  %v2 = load ptr, ptr %SA1, align 8
42  %SA2 = getelementptr inbounds %SA, ptr %pSA, i64 0, i32 4
43  %v3 = load ptr, ptr %SA2, align 8
44  %v4 = bitcast <1 x i64> %v0 to <4 x i16>
45  %v5 = bitcast <4 x i16> %v4 to <1 x i64>
46  %v6 = tail call <1 x i64> @llvm.x86.sse.pshuf.w(<1 x i64> %v5, i8 -18)
47  %v7 = bitcast <1 x i64> %v6 to <4 x i16>
48  %v8 = bitcast <4 x i16> %v7 to <1 x i64>
49  %v9 = extractelement <1 x i64> %v8, i32 0
50  %v10 = bitcast i64 %v9 to <2 x i32>
51  %v11 = extractelement <2 x i32> %v10, i32 0
52  %cmp = icmp eq i32 %v11, 0
53  br i1 %cmp, label %if.A, label %if.B
54
55if.A:
56  %pa = phi <1 x i64> [ %v8, %entry ], [ %vx, %if.C ]
57  %v17 = extractelement <1 x i64> %pa, i32 0
58  %v18 = bitcast i64 %v17 to <1 x i64>
59  %v19 = tail call <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64> %v18, i32 %B) #2
60  %v20 = bitcast <1 x i64> %v19 to i64
61  %v21 = insertelement <1 x i64> undef, i64 %v20, i32 0
62  %cmp3 = icmp eq i64 %v20, 0
63  br i1 %cmp3, label %if.C, label %merge
64
65if.B:
66  %v34 = bitcast <1 x i64> %v8 to <4 x i16>
67  %v35 = bitcast <4 x i16> %v34 to <1 x i64>
68  %v36 = tail call <1 x i64> @llvm.x86.sse.pshuf.w(<1 x i64> %v35, i8 -18)
69  %v37 = bitcast <1 x i64> %v36 to <4 x i16>
70  %v38 = bitcast <4 x i16> %v37 to <1 x i64>
71  br label %if.C
72
73if.C:
74  %vx = phi <1 x i64> [ %v21, %if.A ], [ %v38, %if.B ]
75  %cvt = bitcast <1 x i64> %vx to <2 x i32>
76  %ex = extractelement <2 x i32> %cvt, i32 0
77  %cmp2 = icmp eq i32 %ex, 0
78  br i1 %cmp2, label %if.A, label %merge
79
80merge:
81  %vy = phi <1 x i64> [ %v21, %if.A ], [ %vx, %if.C ]
82  %v130 = bitcast <1 x i64> %vy to <4 x i16>
83  %v131 = bitcast <4 x i16> %v130 to <1 x i64>
84  %v132 = tail call <1 x i64> @llvm.x86.sse.pshuf.w(<1 x i64> %v131, i8 -18)
85  %v133 = bitcast <1 x i64> %v132 to <4 x i16>
86  %v134 = bitcast <4 x i16> %v133 to <1 x i64>
87  %v135 = extractelement <1 x i64> %v134, i32 0
88  %v136 = bitcast i64 %v135 to <2 x i32>
89  %v137 = extractelement <2 x i32> %v136, i32 0
90  ret i32 %v137
91}
92
93
94declare <1 x i64> @llvm.x86.sse.pshuf.w(<1 x i64>, i8)
95declare <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64>, i32)
96