xref: /llvm-project/llvm/test/CodeGen/ARM/vcmpz.ll (revision f970b007e55d6dab6d84d98a39658a58019eb06e)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=armv8-eabi -mattr=+neon | FileCheck %s
3
4define arm_aapcs_vfpcc <4 x i32> @vcmpz_eq(<4 x i32> %0, <4 x i32> %b) {
5; CHECK-LABEL: vcmpz_eq:
6; CHECK:       @ %bb.0:
7; CHECK-NEXT:    vceq.i32 q0, q0, q1
8; CHECK-NEXT:    bx lr
9  %2 = icmp eq <4 x i32> %0, %b
10  %3 = sext <4 x i1> %2 to <4 x i32>
11  ret <4 x i32> %3
12}
13
14define arm_aapcs_vfpcc <4 x i32> @vcmpz_ne(<4 x i32> %0, <4 x i32> %b) {
15; CHECK-LABEL: vcmpz_ne:
16; CHECK:       @ %bb.0:
17; CHECK-NEXT:    vceq.i32 q8, q0, q1
18; CHECK-NEXT:    vmvn q0, q8
19; CHECK-NEXT:    bx lr
20  %2 = icmp ne <4 x i32> %0, %b
21  %3 = sext <4 x i1> %2 to <4 x i32>
22  ret <4 x i32> %3
23}
24
25define arm_aapcs_vfpcc <4 x i32> @vcmpz_slt(<4 x i32> %0, <4 x i32> %b) {
26; CHECK-LABEL: vcmpz_slt:
27; CHECK:       @ %bb.0:
28; CHECK-NEXT:    vcgt.s32 q0, q1, q0
29; CHECK-NEXT:    bx lr
30  %2 = icmp slt <4 x i32> %0, %b
31  %3 = sext <4 x i1> %2 to <4 x i32>
32  ret <4 x i32> %3
33}
34
35define arm_aapcs_vfpcc <4 x i32> @vcmpz_sle(<4 x i32> %0, <4 x i32> %b) {
36; CHECK-LABEL: vcmpz_sle:
37; CHECK:       @ %bb.0:
38; CHECK-NEXT:    vcge.s32 q0, q1, q0
39; CHECK-NEXT:    bx lr
40  %2 = icmp sle <4 x i32> %0, %b
41  %3 = sext <4 x i1> %2 to <4 x i32>
42  ret <4 x i32> %3
43}
44
45define arm_aapcs_vfpcc <4 x i32> @vcmpz_sgt(<4 x i32> %0, <4 x i32> %b) {
46; CHECK-LABEL: vcmpz_sgt:
47; CHECK:       @ %bb.0:
48; CHECK-NEXT:    vcgt.s32 q0, q0, q1
49; CHECK-NEXT:    bx lr
50  %2 = icmp sgt <4 x i32> %0, %b
51  %3 = sext <4 x i1> %2 to <4 x i32>
52  ret <4 x i32> %3
53}
54
55define arm_aapcs_vfpcc <4 x i32> @vcmpz_sge(<4 x i32> %0, <4 x i32> %b) {
56; CHECK-LABEL: vcmpz_sge:
57; CHECK:       @ %bb.0:
58; CHECK-NEXT:    vcge.s32 q0, q0, q1
59; CHECK-NEXT:    bx lr
60  %2 = icmp sge <4 x i32> %0, %b
61  %3 = sext <4 x i1> %2 to <4 x i32>
62  ret <4 x i32> %3
63}
64
65define arm_aapcs_vfpcc <4 x i32> @vcmpz_ult(<4 x i32> %0, <4 x i32> %b) {
66; CHECK-LABEL: vcmpz_ult:
67; CHECK:       @ %bb.0:
68; CHECK-NEXT:    vcgt.u32 q0, q1, q0
69; CHECK-NEXT:    bx lr
70  %2 = icmp ult <4 x i32> %0, %b
71  %3 = sext <4 x i1> %2 to <4 x i32>
72  ret <4 x i32> %3
73}
74
75define arm_aapcs_vfpcc <4 x i32> @vcmpz_ule(<4 x i32> %0, <4 x i32> %b) {
76; CHECK-LABEL: vcmpz_ule:
77; CHECK:       @ %bb.0:
78; CHECK-NEXT:    vcge.u32 q0, q1, q0
79; CHECK-NEXT:    bx lr
80  %2 = icmp ule <4 x i32> %0, %b
81  %3 = sext <4 x i1> %2 to <4 x i32>
82  ret <4 x i32> %3
83}
84
85define arm_aapcs_vfpcc <4 x i32> @vcmpz_ugt(<4 x i32> %0, <4 x i32> %b) {
86; CHECK-LABEL: vcmpz_ugt:
87; CHECK:       @ %bb.0:
88; CHECK-NEXT:    vcgt.u32 q0, q0, q1
89; CHECK-NEXT:    bx lr
90  %2 = icmp ugt <4 x i32> %0, %b
91  %3 = sext <4 x i1> %2 to <4 x i32>
92  ret <4 x i32> %3
93}
94
95define arm_aapcs_vfpcc <4 x i32> @vcmpz_uge(<4 x i32> %0, <4 x i32> %b) {
96; CHECK-LABEL: vcmpz_uge:
97; CHECK:       @ %bb.0:
98; CHECK-NEXT:    vcge.u32 q0, q0, q1
99; CHECK-NEXT:    bx lr
100  %2 = icmp uge <4 x i32> %0, %b
101  %3 = sext <4 x i1> %2 to <4 x i32>
102  ret <4 x i32> %3
103}
104
105
106define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_eq(<4 x i32> %0) {
107; CHECK-LABEL: vcmpz_zr_eq:
108; CHECK:       @ %bb.0:
109; CHECK-NEXT:    vceq.i32 q0, q0, #0
110; CHECK-NEXT:    bx lr
111  %2 = icmp eq <4 x i32> %0, zeroinitializer
112  %3 = sext <4 x i1> %2 to <4 x i32>
113  ret <4 x i32> %3
114}
115
116define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_ne(<4 x i32> %0) {
117; CHECK-LABEL: vcmpz_zr_ne:
118; CHECK:       @ %bb.0:
119; CHECK-NEXT:    vceq.i32 q8, q0, #0
120; CHECK-NEXT:    vmvn q0, q8
121; CHECK-NEXT:    bx lr
122  %2 = icmp ne <4 x i32> %0, zeroinitializer
123  %3 = sext <4 x i1> %2 to <4 x i32>
124  ret <4 x i32> %3
125}
126
127define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_slt(<4 x i32> %0) {
128; CHECK-LABEL: vcmpz_zr_slt:
129; CHECK:       @ %bb.0:
130; CHECK-NEXT:    vclt.s32 q0, q0, #0
131; CHECK-NEXT:    bx lr
132  %2 = icmp slt <4 x i32> %0, zeroinitializer
133  %3 = sext <4 x i1> %2 to <4 x i32>
134  ret <4 x i32> %3
135}
136
137define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_sle(<4 x i32> %0) {
138; CHECK-LABEL: vcmpz_zr_sle:
139; CHECK:       @ %bb.0:
140; CHECK-NEXT:    vcle.s32 q0, q0, #0
141; CHECK-NEXT:    bx lr
142  %2 = icmp sle <4 x i32> %0, zeroinitializer
143  %3 = sext <4 x i1> %2 to <4 x i32>
144  ret <4 x i32> %3
145}
146
147define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_sgt(<4 x i32> %0) {
148; CHECK-LABEL: vcmpz_zr_sgt:
149; CHECK:       @ %bb.0:
150; CHECK-NEXT:    vcgt.s32 q0, q0, #0
151; CHECK-NEXT:    bx lr
152  %2 = icmp sgt <4 x i32> %0, zeroinitializer
153  %3 = sext <4 x i1> %2 to <4 x i32>
154  ret <4 x i32> %3
155}
156
157define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_sge(<4 x i32> %0) {
158; CHECK-LABEL: vcmpz_zr_sge:
159; CHECK:       @ %bb.0:
160; CHECK-NEXT:    vcge.s32 q0, q0, #0
161; CHECK-NEXT:    bx lr
162  %2 = icmp sge <4 x i32> %0, zeroinitializer
163  %3 = sext <4 x i1> %2 to <4 x i32>
164  ret <4 x i32> %3
165}
166
167define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_ult(<4 x i32> %0) {
168; CHECK-LABEL: vcmpz_zr_ult:
169; CHECK:       @ %bb.0:
170; CHECK-NEXT:    vmov.i32 q0, #0x0
171; CHECK-NEXT:    bx lr
172  %2 = icmp ult <4 x i32> %0, zeroinitializer
173  %3 = sext <4 x i1> %2 to <4 x i32>
174  ret <4 x i32> %3
175}
176
177define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_ule(<4 x i32> %0) {
178; CHECK-LABEL: vcmpz_zr_ule:
179; CHECK:       @ %bb.0:
180; CHECK-NEXT:    vmov.i32 q8, #0x0
181; CHECK-NEXT:    vcge.u32 q0, q8, q0
182; CHECK-NEXT:    bx lr
183  %2 = icmp ule <4 x i32> %0, zeroinitializer
184  %3 = sext <4 x i1> %2 to <4 x i32>
185  ret <4 x i32> %3
186}
187
188define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_ugt(<4 x i32> %0) {
189; CHECK-LABEL: vcmpz_zr_ugt:
190; CHECK:       @ %bb.0:
191; CHECK-NEXT:    vceq.i32 q8, q0, #0
192; CHECK-NEXT:    vmvn q0, q8
193; CHECK-NEXT:    bx lr
194  %2 = icmp ugt <4 x i32> %0, zeroinitializer
195  %3 = sext <4 x i1> %2 to <4 x i32>
196  ret <4 x i32> %3
197}
198
199define arm_aapcs_vfpcc <4 x i32> @vcmpz_zr_uge(<4 x i32> %0) {
200; CHECK-LABEL: vcmpz_zr_uge:
201; CHECK:       @ %bb.0:
202; CHECK-NEXT:    vmov.i8 q0, #0xff
203; CHECK-NEXT:    bx lr
204  %2 = icmp uge <4 x i32> %0, zeroinitializer
205  %3 = sext <4 x i1> %2 to <4 x i32>
206  ret <4 x i32> %3
207}
208
209
210define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_eq(<4 x i32> %0) {
211; CHECK-LABEL: vcmpz_zl_eq:
212; CHECK:       @ %bb.0:
213; CHECK-NEXT:    vceq.i32 q0, q0, #0
214; CHECK-NEXT:    bx lr
215  %2 = icmp eq <4 x i32> zeroinitializer, %0
216  %3 = sext <4 x i1> %2 to <4 x i32>
217  ret <4 x i32> %3
218}
219
220define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_ne(<4 x i32> %0) {
221; CHECK-LABEL: vcmpz_zl_ne:
222; CHECK:       @ %bb.0:
223; CHECK-NEXT:    vceq.i32 q8, q0, #0
224; CHECK-NEXT:    vmvn q0, q8
225; CHECK-NEXT:    bx lr
226  %2 = icmp ne <4 x i32> zeroinitializer, %0
227  %3 = sext <4 x i1> %2 to <4 x i32>
228  ret <4 x i32> %3
229}
230
231define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_slt(<4 x i32> %0) {
232; CHECK-LABEL: vcmpz_zl_slt:
233; CHECK:       @ %bb.0:
234; CHECK-NEXT:    vcgt.s32 q0, q0, #0
235; CHECK-NEXT:    bx lr
236  %2 = icmp slt <4 x i32> zeroinitializer, %0
237  %3 = sext <4 x i1> %2 to <4 x i32>
238  ret <4 x i32> %3
239}
240
241define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_sle(<4 x i32> %0) {
242; CHECK-LABEL: vcmpz_zl_sle:
243; CHECK:       @ %bb.0:
244; CHECK-NEXT:    vcge.s32 q0, q0, #0
245; CHECK-NEXT:    bx lr
246  %2 = icmp sle <4 x i32> zeroinitializer, %0
247  %3 = sext <4 x i1> %2 to <4 x i32>
248  ret <4 x i32> %3
249}
250
251define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_sgt(<4 x i32> %0) {
252; CHECK-LABEL: vcmpz_zl_sgt:
253; CHECK:       @ %bb.0:
254; CHECK-NEXT:    vclt.s32 q0, q0, #0
255; CHECK-NEXT:    bx lr
256  %2 = icmp sgt <4 x i32> zeroinitializer, %0
257  %3 = sext <4 x i1> %2 to <4 x i32>
258  ret <4 x i32> %3
259}
260
261define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_sge(<4 x i32> %0) {
262; CHECK-LABEL: vcmpz_zl_sge:
263; CHECK:       @ %bb.0:
264; CHECK-NEXT:    vcle.s32 q0, q0, #0
265; CHECK-NEXT:    bx lr
266  %2 = icmp sge <4 x i32> zeroinitializer, %0
267  %3 = sext <4 x i1> %2 to <4 x i32>
268  ret <4 x i32> %3
269}
270
271define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_ult(<4 x i32> %0) {
272; CHECK-LABEL: vcmpz_zl_ult:
273; CHECK:       @ %bb.0:
274; CHECK-NEXT:    vceq.i32 q8, q0, #0
275; CHECK-NEXT:    vmvn q0, q8
276; CHECK-NEXT:    bx lr
277  %2 = icmp ult <4 x i32> zeroinitializer, %0
278  %3 = sext <4 x i1> %2 to <4 x i32>
279  ret <4 x i32> %3
280}
281
282define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_ule(<4 x i32> %0) {
283; CHECK-LABEL: vcmpz_zl_ule:
284; CHECK:       @ %bb.0:
285; CHECK-NEXT:    vmov.i8 q0, #0xff
286; CHECK-NEXT:    bx lr
287  %2 = icmp ule <4 x i32> zeroinitializer, %0
288  %3 = sext <4 x i1> %2 to <4 x i32>
289  ret <4 x i32> %3
290}
291
292define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_ugt(<4 x i32> %0) {
293; CHECK-LABEL: vcmpz_zl_ugt:
294; CHECK:       @ %bb.0:
295; CHECK-NEXT:    vmov.i32 q0, #0x0
296; CHECK-NEXT:    bx lr
297  %2 = icmp ugt <4 x i32> zeroinitializer, %0
298  %3 = sext <4 x i1> %2 to <4 x i32>
299  ret <4 x i32> %3
300}
301
302define arm_aapcs_vfpcc <4 x i32> @vcmpz_zl_uge(<4 x i32> %0) {
303; CHECK-LABEL: vcmpz_zl_uge:
304; CHECK:       @ %bb.0:
305; CHECK-NEXT:    vmov.i32 q8, #0x0
306; CHECK-NEXT:    vcge.u32 q0, q8, q0
307; CHECK-NEXT:    bx lr
308  %2 = icmp uge <4 x i32> zeroinitializer, %0
309  %3 = sext <4 x i1> %2 to <4 x i32>
310  ret <4 x i32> %3
311}
312