xref: /llvm-project/llvm/test/CodeGen/X86/movmsk.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-apple-macosx10.6.6 -mattr=+sse4.1 | FileCheck %s
3
4%0 = type { double }
5%union.anon = type { float }
6
7define i32 @double_signbit(double %d1) nounwind uwtable readnone ssp {
8; CHECK-LABEL: double_signbit:
9; CHECK:       ## %bb.0: ## %entry
10; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
11; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
12; CHECK-NEXT:    movmskpd %xmm0, %eax
13; CHECK-NEXT:    andl $1, %eax
14; CHECK-NEXT:    retq
15entry:
16  %__x.addr.i = alloca double, align 8
17  %__u.i = alloca %0, align 8
18  store double %d1, ptr %__x.addr.i, align 8
19  store double %d1, ptr %__u.i, align 8
20  %tmp = bitcast double %d1 to i64
21  %tmp1 = lshr i64 %tmp, 63
22  %shr.i = trunc i64 %tmp1 to i32
23  ret i32 %shr.i
24}
25
26define i32 @double_add_signbit(double %d1, double %d2) nounwind uwtable readnone ssp {
27; CHECK-LABEL: double_add_signbit:
28; CHECK:       ## %bb.0: ## %entry
29; CHECK-NEXT:    addsd %xmm1, %xmm0
30; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
31; CHECK-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
32; CHECK-NEXT:    movmskpd %xmm0, %eax
33; CHECK-NEXT:    andl $1, %eax
34; CHECK-NEXT:    retq
35entry:
36  %__x.addr.i = alloca double, align 8
37  %__u.i = alloca %0, align 8
38  %add = fadd double %d1, %d2
39  store double %add, ptr %__x.addr.i, align 8
40  store double %add, ptr %__u.i, align 8
41  %tmp = bitcast double %add to i64
42  %tmp1 = lshr i64 %tmp, 63
43  %shr.i = trunc i64 %tmp1 to i32
44  ret i32 %shr.i
45}
46
47define i32 @float_signbit(float %f1) nounwind uwtable readnone ssp {
48; CHECK-LABEL: float_signbit:
49; CHECK:       ## %bb.0: ## %entry
50; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
51; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
52; CHECK-NEXT:    movmskps %xmm0, %eax
53; CHECK-NEXT:    andl $1, %eax
54; CHECK-NEXT:    retq
55entry:
56  %__x.addr.i = alloca float, align 4
57  %__u.i = alloca %union.anon, align 4
58  store float %f1, ptr %__x.addr.i, align 4
59  store float %f1, ptr %__u.i, align 4
60  %0 = bitcast float %f1 to i32
61  %shr.i = lshr i32 %0, 31
62  ret i32 %shr.i
63}
64
65define i32 @float_add_signbit(float %f1, float %f2) nounwind uwtable readnone ssp {
66; CHECK-LABEL: float_add_signbit:
67; CHECK:       ## %bb.0: ## %entry
68; CHECK-NEXT:    addss %xmm1, %xmm0
69; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
70; CHECK-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
71; CHECK-NEXT:    movmskps %xmm0, %eax
72; CHECK-NEXT:    andl $1, %eax
73; CHECK-NEXT:    retq
74entry:
75  %__x.addr.i = alloca float, align 4
76  %__u.i = alloca %union.anon, align 4
77  %add = fadd float %f1, %f2
78  store float %add, ptr %__x.addr.i, align 4
79  store float %add, ptr %__u.i, align 4
80  %0 = bitcast float %add to i32
81  %shr.i = lshr i32 %0, 31
82  ret i32 %shr.i
83}
84
85; PR11570
86define void @float_call_signbit(double %n) {
87; CHECK-LABEL: float_call_signbit:
88; CHECK:       ## %bb.0: ## %entry
89; CHECK-NEXT:    movmskpd %xmm0, %edi
90; CHECK-NEXT:    andl $1, %edi
91; CHECK-NEXT:    jmp _float_call_signbit_callee ## TAILCALL
92entry:
93  %t0 = bitcast double %n to i64
94  %tobool.i.i.i.i = icmp slt i64 %t0, 0
95  tail call void @float_call_signbit_callee(i1 zeroext %tobool.i.i.i.i)
96  ret void
97}
98declare void @float_call_signbit_callee(i1 zeroext)
99
100; Known zeros
101define i32 @knownbits_v2f64(<2 x double> %x) {
102; CHECK-LABEL: knownbits_v2f64:
103; CHECK:       ## %bb.0:
104; CHECK-NEXT:    movmskpd %xmm0, %eax
105; CHECK-NEXT:    retq
106  %1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %x)
107  %2 = and i32 %1, 3
108  ret i32 %2
109}
110
111; Don't demand any movmsk signbits -> zero
112define i32 @demandedbits_v16i8(<16 x i8> %x) {
113; CHECK-LABEL: demandedbits_v16i8:
114; CHECK:       ## %bb.0:
115; CHECK-NEXT:    xorl %eax, %eax
116; CHECK-NEXT:    retq
117  %1 = tail call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %x)
118  %2 = and i32 %1, 65536
119  ret i32 %2
120}
121
122; Simplify demanded vector elts
123define i32 @demandedelts_v4f32(<4 x float> %x) {
124; CHECK-LABEL: demandedelts_v4f32:
125; CHECK:       ## %bb.0:
126; CHECK-NEXT:    movmskps %xmm0, %eax
127; CHECK-NEXT:    andl $1, %eax
128; CHECK-NEXT:    retq
129  %1 = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> zeroinitializer
130  %2 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %1)
131  %3 = and i32 %2, 1
132  ret i32 %3
133}
134
135; rdar://10247336
136; movmskp{s|d} only set low 4/2 bits, high bits are known zero
137
138define i32 @t1(<4 x float> %x, ptr nocapture %indexTable) nounwind uwtable readonly ssp {
139; CHECK-LABEL: t1:
140; CHECK:       ## %bb.0: ## %entry
141; CHECK-NEXT:    movmskps %xmm0, %eax
142; CHECK-NEXT:    movl (%rdi,%rax,4), %eax
143; CHECK-NEXT:    retq
144entry:
145  %0 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %x) nounwind
146  %idxprom = sext i32 %0 to i64
147  %arrayidx = getelementptr inbounds i32, ptr %indexTable, i64 %idxprom
148  %1 = load i32, ptr %arrayidx, align 4
149  ret i32 %1
150}
151
152define i32 @t2(<4 x float> %x, ptr nocapture %indexTable) nounwind uwtable readonly ssp {
153; CHECK-LABEL: t2:
154; CHECK:       ## %bb.0: ## %entry
155; CHECK-NEXT:    movmskpd %xmm0, %eax
156; CHECK-NEXT:    movl (%rdi,%rax,4), %eax
157; CHECK-NEXT:    retq
158entry:
159  %0 = bitcast <4 x float> %x to <2 x double>
160  %1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %0) nounwind
161  %idxprom = sext i32 %1 to i64
162  %arrayidx = getelementptr inbounds i32, ptr %indexTable, i64 %idxprom
163  %2 = load i32, ptr %arrayidx, align 4
164  ret i32 %2
165}
166
167declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
168declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
169declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone
170