xref: /llvm-project/llvm/test/CodeGen/LoongArch/lsx/ir-instruction/icmp.ll (revision a5c90e48b6f11bc6db7344503589648f76b16d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
3
4;; SETEQ
5define void @v16i8_icmp_eq_imm(ptr %res, ptr %a0) nounwind {
6; CHECK-LABEL: v16i8_icmp_eq_imm:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vld $vr0, $a1, 0
9; CHECK-NEXT:    vseqi.b $vr0, $vr0, 15
10; CHECK-NEXT:    vst $vr0, $a0, 0
11; CHECK-NEXT:    ret
12  %v0 = load <16 x i8>, ptr %a0
13  %cmp = icmp eq <16 x i8> %v0, <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>
14  %ext = sext <16 x i1> %cmp to <16 x i8>
15  store <16 x i8> %ext, ptr %res
16  ret void
17}
18
19define void @v16i8_icmp_eq(ptr %res, ptr %a0, ptr %a1) nounwind {
20; CHECK-LABEL: v16i8_icmp_eq:
21; CHECK:       # %bb.0:
22; CHECK-NEXT:    vld $vr0, $a1, 0
23; CHECK-NEXT:    vld $vr1, $a2, 0
24; CHECK-NEXT:    vseq.b $vr0, $vr0, $vr1
25; CHECK-NEXT:    vst $vr0, $a0, 0
26; CHECK-NEXT:    ret
27  %v0 = load <16 x i8>, ptr %a0
28  %v1 = load <16 x i8>, ptr %a1
29  %cmp = icmp eq <16 x i8> %v0, %v1
30  %ext = sext <16 x i1> %cmp to <16 x i8>
31  store <16 x i8> %ext, ptr %res
32  ret void
33}
34
35define void @v8i16_icmp_eq_imm(ptr %res, ptr %a0) nounwind {
36; CHECK-LABEL: v8i16_icmp_eq_imm:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    vld $vr0, $a1, 0
39; CHECK-NEXT:    vseqi.h $vr0, $vr0, 15
40; CHECK-NEXT:    vst $vr0, $a0, 0
41; CHECK-NEXT:    ret
42  %v0 = load <8 x i16>, ptr %a0
43  %cmp = icmp eq <8 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
44  %ext = sext <8 x i1> %cmp to <8 x i16>
45  store <8 x i16> %ext, ptr %res
46  ret void
47}
48
49define void @v8i16_icmp_eq(ptr %res, ptr %a0, ptr %a1) nounwind {
50; CHECK-LABEL: v8i16_icmp_eq:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vld $vr0, $a1, 0
53; CHECK-NEXT:    vld $vr1, $a2, 0
54; CHECK-NEXT:    vseq.h $vr0, $vr0, $vr1
55; CHECK-NEXT:    vst $vr0, $a0, 0
56; CHECK-NEXT:    ret
57  %v0 = load <8 x i16>, ptr %a0
58  %v1 = load <8 x i16>, ptr %a1
59  %cmp = icmp eq <8 x i16> %v0, %v1
60  %ext = sext <8 x i1> %cmp to <8 x i16>
61  store <8 x i16> %ext, ptr %res
62  ret void
63}
64
65define void @v4i32_icmp_eq_imm(ptr %res, ptr %a0) nounwind {
66; CHECK-LABEL: v4i32_icmp_eq_imm:
67; CHECK:       # %bb.0:
68; CHECK-NEXT:    vld $vr0, $a1, 0
69; CHECK-NEXT:    vseqi.w $vr0, $vr0, 15
70; CHECK-NEXT:    vst $vr0, $a0, 0
71; CHECK-NEXT:    ret
72  %v0 = load <4 x i32>, ptr %a0
73  %cmp = icmp eq <4 x i32> %v0, <i32 15, i32 15, i32 15, i32 15>
74  %ext = sext <4 x i1> %cmp to <4 x i32>
75  store <4 x i32> %ext, ptr %res
76  ret void
77}
78
79define void @v4i32_icmp_eq(ptr %res, ptr %a0, ptr %a1) nounwind {
80; CHECK-LABEL: v4i32_icmp_eq:
81; CHECK:       # %bb.0:
82; CHECK-NEXT:    vld $vr0, $a1, 0
83; CHECK-NEXT:    vld $vr1, $a2, 0
84; CHECK-NEXT:    vseq.w $vr0, $vr0, $vr1
85; CHECK-NEXT:    vst $vr0, $a0, 0
86; CHECK-NEXT:    ret
87  %v0 = load <4 x i32>, ptr %a0
88  %v1 = load <4 x i32>, ptr %a1
89  %cmp = icmp eq <4 x i32> %v0, %v1
90  %ext = sext <4 x i1> %cmp to <4 x i32>
91  store <4 x i32> %ext, ptr %res
92  ret void
93}
94
95define void @v2i64_icmp_eq_imm(ptr %res, ptr %a0) nounwind {
96; CHECK-LABEL: v2i64_icmp_eq_imm:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vld $vr0, $a1, 0
99; CHECK-NEXT:    vseqi.d $vr0, $vr0, 15
100; CHECK-NEXT:    vst $vr0, $a0, 0
101; CHECK-NEXT:    ret
102  %v0 = load <2 x i64>, ptr %a0
103  %cmp = icmp eq <2 x i64> %v0, <i64 15, i64 15>
104  %ext = sext <2 x i1> %cmp to <2 x i64>
105  store <2 x i64> %ext, ptr %res
106  ret void
107}
108
109define void @v2i64_icmp_eq(ptr %res, ptr %a0, ptr %a1) nounwind {
110; CHECK-LABEL: v2i64_icmp_eq:
111; CHECK:       # %bb.0:
112; CHECK-NEXT:    vld $vr0, $a1, 0
113; CHECK-NEXT:    vld $vr1, $a2, 0
114; CHECK-NEXT:    vseq.d $vr0, $vr0, $vr1
115; CHECK-NEXT:    vst $vr0, $a0, 0
116; CHECK-NEXT:    ret
117  %v0 = load <2 x i64>, ptr %a0
118  %v1 = load <2 x i64>, ptr %a1
119  %cmp = icmp eq <2 x i64> %v0, %v1
120  %ext = sext <2 x i1> %cmp to <2 x i64>
121  store <2 x i64> %ext, ptr %res
122  ret void
123}
124
125;; SETLE
126define void @v16i8_icmp_sle_imm(ptr %res, ptr %a0) nounwind {
127; CHECK-LABEL: v16i8_icmp_sle_imm:
128; CHECK:       # %bb.0:
129; CHECK-NEXT:    vld $vr0, $a1, 0
130; CHECK-NEXT:    vslei.b $vr0, $vr0, 15
131; CHECK-NEXT:    vst $vr0, $a0, 0
132; CHECK-NEXT:    ret
133  %v0 = load <16 x i8>, ptr %a0
134  %cmp = icmp sle <16 x i8> %v0, <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>
135  %ext = sext <16 x i1> %cmp to <16 x i8>
136  store <16 x i8> %ext, ptr %res
137  ret void
138}
139
140define void @v16i8_icmp_sle(ptr %res, ptr %a0, ptr %a1) nounwind {
141; CHECK-LABEL: v16i8_icmp_sle:
142; CHECK:       # %bb.0:
143; CHECK-NEXT:    vld $vr0, $a1, 0
144; CHECK-NEXT:    vld $vr1, $a2, 0
145; CHECK-NEXT:    vsle.b $vr0, $vr0, $vr1
146; CHECK-NEXT:    vst $vr0, $a0, 0
147; CHECK-NEXT:    ret
148  %v0 = load <16 x i8>, ptr %a0
149  %v1 = load <16 x i8>, ptr %a1
150  %cmp = icmp sle <16 x i8> %v0, %v1
151  %ext = sext <16 x i1> %cmp to <16 x i8>
152  store <16 x i8> %ext, ptr %res
153  ret void
154}
155
156define void @v8i16_icmp_sle_imm(ptr %res, ptr %a0) nounwind {
157; CHECK-LABEL: v8i16_icmp_sle_imm:
158; CHECK:       # %bb.0:
159; CHECK-NEXT:    vld $vr0, $a1, 0
160; CHECK-NEXT:    vslei.h $vr0, $vr0, 15
161; CHECK-NEXT:    vst $vr0, $a0, 0
162; CHECK-NEXT:    ret
163  %v0 = load <8 x i16>, ptr %a0
164  %cmp = icmp sle <8 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
165  %ext = sext <8 x i1> %cmp to <8 x i16>
166  store <8 x i16> %ext, ptr %res
167  ret void
168}
169
170define void @v8i16_icmp_sle(ptr %res, ptr %a0, ptr %a1) nounwind {
171; CHECK-LABEL: v8i16_icmp_sle:
172; CHECK:       # %bb.0:
173; CHECK-NEXT:    vld $vr0, $a1, 0
174; CHECK-NEXT:    vld $vr1, $a2, 0
175; CHECK-NEXT:    vsle.h $vr0, $vr0, $vr1
176; CHECK-NEXT:    vst $vr0, $a0, 0
177; CHECK-NEXT:    ret
178  %v0 = load <8 x i16>, ptr %a0
179  %v1 = load <8 x i16>, ptr %a1
180  %cmp = icmp sle <8 x i16> %v0, %v1
181  %ext = sext <8 x i1> %cmp to <8 x i16>
182  store <8 x i16> %ext, ptr %res
183  ret void
184}
185
186define void @v4i32_icmp_sle_imm(ptr %res, ptr %a0) nounwind {
187; CHECK-LABEL: v4i32_icmp_sle_imm:
188; CHECK:       # %bb.0:
189; CHECK-NEXT:    vld $vr0, $a1, 0
190; CHECK-NEXT:    vslei.w $vr0, $vr0, 15
191; CHECK-NEXT:    vst $vr0, $a0, 0
192; CHECK-NEXT:    ret
193  %v0 = load <4 x i32>, ptr %a0
194  %cmp = icmp sle <4 x i32> %v0, <i32 15, i32 15, i32 15, i32 15>
195  %ext = sext <4 x i1> %cmp to <4 x i32>
196  store <4 x i32> %ext, ptr %res
197  ret void
198}
199
200define void @v4i32_icmp_sle(ptr %res, ptr %a0, ptr %a1) nounwind {
201; CHECK-LABEL: v4i32_icmp_sle:
202; CHECK:       # %bb.0:
203; CHECK-NEXT:    vld $vr0, $a1, 0
204; CHECK-NEXT:    vld $vr1, $a2, 0
205; CHECK-NEXT:    vsle.w $vr0, $vr0, $vr1
206; CHECK-NEXT:    vst $vr0, $a0, 0
207; CHECK-NEXT:    ret
208  %v0 = load <4 x i32>, ptr %a0
209  %v1 = load <4 x i32>, ptr %a1
210  %cmp = icmp sle <4 x i32> %v0, %v1
211  %ext = sext <4 x i1> %cmp to <4 x i32>
212  store <4 x i32> %ext, ptr %res
213  ret void
214}
215
216define void @v2i64_icmp_sle_imm(ptr %res, ptr %a0) nounwind {
217; CHECK-LABEL: v2i64_icmp_sle_imm:
218; CHECK:       # %bb.0:
219; CHECK-NEXT:    vld $vr0, $a1, 0
220; CHECK-NEXT:    vslei.d $vr0, $vr0, 15
221; CHECK-NEXT:    vst $vr0, $a0, 0
222; CHECK-NEXT:    ret
223  %v0 = load <2 x i64>, ptr %a0
224  %cmp = icmp sle <2 x i64> %v0, <i64 15, i64 15>
225  %ext = sext <2 x i1> %cmp to <2 x i64>
226  store <2 x i64> %ext, ptr %res
227  ret void
228}
229
230define void @v2i64_icmp_sle(ptr %res, ptr %a0, ptr %a1) nounwind {
231; CHECK-LABEL: v2i64_icmp_sle:
232; CHECK:       # %bb.0:
233; CHECK-NEXT:    vld $vr0, $a1, 0
234; CHECK-NEXT:    vld $vr1, $a2, 0
235; CHECK-NEXT:    vsle.d $vr0, $vr0, $vr1
236; CHECK-NEXT:    vst $vr0, $a0, 0
237; CHECK-NEXT:    ret
238  %v0 = load <2 x i64>, ptr %a0
239  %v1 = load <2 x i64>, ptr %a1
240  %cmp = icmp sle <2 x i64> %v0, %v1
241  %ext = sext <2 x i1> %cmp to <2 x i64>
242  store <2 x i64> %ext, ptr %res
243  ret void
244}
245
246;; SETULE
247define void @v16i8_icmp_ule_imm(ptr %res, ptr %a0) nounwind {
248; CHECK-LABEL: v16i8_icmp_ule_imm:
249; CHECK:       # %bb.0:
250; CHECK-NEXT:    vld $vr0, $a1, 0
251; CHECK-NEXT:    vslei.bu $vr0, $vr0, 31
252; CHECK-NEXT:    vst $vr0, $a0, 0
253; CHECK-NEXT:    ret
254  %v0 = load <16 x i8>, ptr %a0
255  %cmp = icmp ule <16 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
256  %ext = sext <16 x i1> %cmp to <16 x i8>
257  store <16 x i8> %ext, ptr %res
258  ret void
259}
260
261define void @v16i8_icmp_ule(ptr %res, ptr %a0, ptr %a1) nounwind {
262; CHECK-LABEL: v16i8_icmp_ule:
263; CHECK:       # %bb.0:
264; CHECK-NEXT:    vld $vr0, $a1, 0
265; CHECK-NEXT:    vld $vr1, $a2, 0
266; CHECK-NEXT:    vsle.bu $vr0, $vr0, $vr1
267; CHECK-NEXT:    vst $vr0, $a0, 0
268; CHECK-NEXT:    ret
269  %v0 = load <16 x i8>, ptr %a0
270  %v1 = load <16 x i8>, ptr %a1
271  %cmp = icmp ule <16 x i8> %v0, %v1
272  %ext = sext <16 x i1> %cmp to <16 x i8>
273  store <16 x i8> %ext, ptr %res
274  ret void
275}
276
277define void @v8i16_icmp_ule_imm(ptr %res, ptr %a0) nounwind {
278; CHECK-LABEL: v8i16_icmp_ule_imm:
279; CHECK:       # %bb.0:
280; CHECK-NEXT:    vld $vr0, $a1, 0
281; CHECK-NEXT:    vslei.hu $vr0, $vr0, 31
282; CHECK-NEXT:    vst $vr0, $a0, 0
283; CHECK-NEXT:    ret
284  %v0 = load <8 x i16>, ptr %a0
285  %cmp = icmp ule <8 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
286  %ext = sext <8 x i1> %cmp to <8 x i16>
287  store <8 x i16> %ext, ptr %res
288  ret void
289}
290
291define void @v8i16_icmp_ule(ptr %res, ptr %a0, ptr %a1) nounwind {
292; CHECK-LABEL: v8i16_icmp_ule:
293; CHECK:       # %bb.0:
294; CHECK-NEXT:    vld $vr0, $a1, 0
295; CHECK-NEXT:    vld $vr1, $a2, 0
296; CHECK-NEXT:    vsle.hu $vr0, $vr0, $vr1
297; CHECK-NEXT:    vst $vr0, $a0, 0
298; CHECK-NEXT:    ret
299  %v0 = load <8 x i16>, ptr %a0
300  %v1 = load <8 x i16>, ptr %a1
301  %cmp = icmp ule <8 x i16> %v0, %v1
302  %ext = sext <8 x i1> %cmp to <8 x i16>
303  store <8 x i16> %ext, ptr %res
304  ret void
305}
306
307define void @v4i32_icmp_ule_imm(ptr %res, ptr %a0) nounwind {
308; CHECK-LABEL: v4i32_icmp_ule_imm:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    vld $vr0, $a1, 0
311; CHECK-NEXT:    vslei.wu $vr0, $vr0, 31
312; CHECK-NEXT:    vst $vr0, $a0, 0
313; CHECK-NEXT:    ret
314  %v0 = load <4 x i32>, ptr %a0
315  %cmp = icmp ule <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
316  %ext = sext <4 x i1> %cmp to <4 x i32>
317  store <4 x i32> %ext, ptr %res
318  ret void
319}
320
321define void @v4i32_icmp_ule(ptr %res, ptr %a0, ptr %a1) nounwind {
322; CHECK-LABEL: v4i32_icmp_ule:
323; CHECK:       # %bb.0:
324; CHECK-NEXT:    vld $vr0, $a1, 0
325; CHECK-NEXT:    vld $vr1, $a2, 0
326; CHECK-NEXT:    vsle.wu $vr0, $vr0, $vr1
327; CHECK-NEXT:    vst $vr0, $a0, 0
328; CHECK-NEXT:    ret
329  %v0 = load <4 x i32>, ptr %a0
330  %v1 = load <4 x i32>, ptr %a1
331  %cmp = icmp ule <4 x i32> %v0, %v1
332  %ext = sext <4 x i1> %cmp to <4 x i32>
333  store <4 x i32> %ext, ptr %res
334  ret void
335}
336
337define void @v2i64_icmp_ule_imm(ptr %res, ptr %a0) nounwind {
338; CHECK-LABEL: v2i64_icmp_ule_imm:
339; CHECK:       # %bb.0:
340; CHECK-NEXT:    vld $vr0, $a1, 0
341; CHECK-NEXT:    vslei.du $vr0, $vr0, 31
342; CHECK-NEXT:    vst $vr0, $a0, 0
343; CHECK-NEXT:    ret
344  %v0 = load <2 x i64>, ptr %a0
345  %cmp = icmp ule <2 x i64> %v0, <i64 31, i64 31>
346  %ext = sext <2 x i1> %cmp to <2 x i64>
347  store <2 x i64> %ext, ptr %res
348  ret void
349}
350
351define void @v2i64_icmp_ule(ptr %res, ptr %a0, ptr %a1) nounwind {
352; CHECK-LABEL: v2i64_icmp_ule:
353; CHECK:       # %bb.0:
354; CHECK-NEXT:    vld $vr0, $a1, 0
355; CHECK-NEXT:    vld $vr1, $a2, 0
356; CHECK-NEXT:    vsle.du $vr0, $vr0, $vr1
357; CHECK-NEXT:    vst $vr0, $a0, 0
358; CHECK-NEXT:    ret
359  %v0 = load <2 x i64>, ptr %a0
360  %v1 = load <2 x i64>, ptr %a1
361  %cmp = icmp ule <2 x i64> %v0, %v1
362  %ext = sext <2 x i1> %cmp to <2 x i64>
363  store <2 x i64> %ext, ptr %res
364  ret void
365}
366
367;; SETLT
368define void @v16i8_icmp_slt_imm(ptr %res, ptr %a0) nounwind {
369; CHECK-LABEL: v16i8_icmp_slt_imm:
370; CHECK:       # %bb.0:
371; CHECK-NEXT:    vld $vr0, $a1, 0
372; CHECK-NEXT:    vslti.b $vr0, $vr0, 15
373; CHECK-NEXT:    vst $vr0, $a0, 0
374; CHECK-NEXT:    ret
375  %v0 = load <16 x i8>, ptr %a0
376  %cmp = icmp slt <16 x i8> %v0, <i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15, i8 15>
377  %ext = sext <16 x i1> %cmp to <16 x i8>
378  store <16 x i8> %ext, ptr %res
379  ret void
380}
381
382define void @v16i8_icmp_slt(ptr %res, ptr %a0, ptr %a1) nounwind {
383; CHECK-LABEL: v16i8_icmp_slt:
384; CHECK:       # %bb.0:
385; CHECK-NEXT:    vld $vr0, $a1, 0
386; CHECK-NEXT:    vld $vr1, $a2, 0
387; CHECK-NEXT:    vslt.b $vr0, $vr0, $vr1
388; CHECK-NEXT:    vst $vr0, $a0, 0
389; CHECK-NEXT:    ret
390  %v0 = load <16 x i8>, ptr %a0
391  %v1 = load <16 x i8>, ptr %a1
392  %cmp = icmp slt <16 x i8> %v0, %v1
393  %ext = sext <16 x i1> %cmp to <16 x i8>
394  store <16 x i8> %ext, ptr %res
395  ret void
396}
397
398define void @v8i16_icmp_slt_imm(ptr %res, ptr %a0) nounwind {
399; CHECK-LABEL: v8i16_icmp_slt_imm:
400; CHECK:       # %bb.0:
401; CHECK-NEXT:    vld $vr0, $a1, 0
402; CHECK-NEXT:    vslti.h $vr0, $vr0, 15
403; CHECK-NEXT:    vst $vr0, $a0, 0
404; CHECK-NEXT:    ret
405  %v0 = load <8 x i16>, ptr %a0
406  %cmp = icmp slt <8 x i16> %v0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
407  %ext = sext <8 x i1> %cmp to <8 x i16>
408  store <8 x i16> %ext, ptr %res
409  ret void
410}
411
412define void @v8i16_icmp_slt(ptr %res, ptr %a0, ptr %a1) nounwind {
413; CHECK-LABEL: v8i16_icmp_slt:
414; CHECK:       # %bb.0:
415; CHECK-NEXT:    vld $vr0, $a1, 0
416; CHECK-NEXT:    vld $vr1, $a2, 0
417; CHECK-NEXT:    vslt.h $vr0, $vr0, $vr1
418; CHECK-NEXT:    vst $vr0, $a0, 0
419; CHECK-NEXT:    ret
420  %v0 = load <8 x i16>, ptr %a0
421  %v1 = load <8 x i16>, ptr %a1
422  %cmp = icmp slt <8 x i16> %v0, %v1
423  %ext = sext <8 x i1> %cmp to <8 x i16>
424  store <8 x i16> %ext, ptr %res
425  ret void
426}
427
428define void @v4i32_icmp_slt_imm(ptr %res, ptr %a0) nounwind {
429; CHECK-LABEL: v4i32_icmp_slt_imm:
430; CHECK:       # %bb.0:
431; CHECK-NEXT:    vld $vr0, $a1, 0
432; CHECK-NEXT:    vslti.w $vr0, $vr0, 15
433; CHECK-NEXT:    vst $vr0, $a0, 0
434; CHECK-NEXT:    ret
435  %v0 = load <4 x i32>, ptr %a0
436  %cmp = icmp slt <4 x i32> %v0, <i32 15, i32 15, i32 15, i32 15>
437  %ext = sext <4 x i1> %cmp to <4 x i32>
438  store <4 x i32> %ext, ptr %res
439  ret void
440}
441
442define void @v4i32_icmp_slt(ptr %res, ptr %a0, ptr %a1) nounwind {
443; CHECK-LABEL: v4i32_icmp_slt:
444; CHECK:       # %bb.0:
445; CHECK-NEXT:    vld $vr0, $a1, 0
446; CHECK-NEXT:    vld $vr1, $a2, 0
447; CHECK-NEXT:    vslt.w $vr0, $vr0, $vr1
448; CHECK-NEXT:    vst $vr0, $a0, 0
449; CHECK-NEXT:    ret
450  %v0 = load <4 x i32>, ptr %a0
451  %v1 = load <4 x i32>, ptr %a1
452  %cmp = icmp slt <4 x i32> %v0, %v1
453  %ext = sext <4 x i1> %cmp to <4 x i32>
454  store <4 x i32> %ext, ptr %res
455  ret void
456}
457
458define void @v2i64_icmp_slt_imm(ptr %res, ptr %a0) nounwind {
459; CHECK-LABEL: v2i64_icmp_slt_imm:
460; CHECK:       # %bb.0:
461; CHECK-NEXT:    vld $vr0, $a1, 0
462; CHECK-NEXT:    vslti.d $vr0, $vr0, 15
463; CHECK-NEXT:    vst $vr0, $a0, 0
464; CHECK-NEXT:    ret
465  %v0 = load <2 x i64>, ptr %a0
466  %cmp = icmp slt <2 x i64> %v0, <i64 15, i64 15>
467  %ext = sext <2 x i1> %cmp to <2 x i64>
468  store <2 x i64> %ext, ptr %res
469  ret void
470}
471
472define void @v2i64_icmp_slt(ptr %res, ptr %a0, ptr %a1) nounwind {
473; CHECK-LABEL: v2i64_icmp_slt:
474; CHECK:       # %bb.0:
475; CHECK-NEXT:    vld $vr0, $a1, 0
476; CHECK-NEXT:    vld $vr1, $a2, 0
477; CHECK-NEXT:    vslt.d $vr0, $vr0, $vr1
478; CHECK-NEXT:    vst $vr0, $a0, 0
479; CHECK-NEXT:    ret
480  %v0 = load <2 x i64>, ptr %a0
481  %v1 = load <2 x i64>, ptr %a1
482  %cmp = icmp slt <2 x i64> %v0, %v1
483  %ext = sext <2 x i1> %cmp to <2 x i64>
484  store <2 x i64> %ext, ptr %res
485  ret void
486}
487
488;; SETULT
489define void @v16i8_icmp_ult_imm(ptr %res, ptr %a0) nounwind {
490; CHECK-LABEL: v16i8_icmp_ult_imm:
491; CHECK:       # %bb.0:
492; CHECK-NEXT:    vld $vr0, $a1, 0
493; CHECK-NEXT:    vslti.bu $vr0, $vr0, 31
494; CHECK-NEXT:    vst $vr0, $a0, 0
495; CHECK-NEXT:    ret
496  %v0 = load <16 x i8>, ptr %a0
497  %cmp = icmp ult <16 x i8> %v0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
498  %ext = sext <16 x i1> %cmp to <16 x i8>
499  store <16 x i8> %ext, ptr %res
500  ret void
501}
502
503define void @v16i8_icmp_ult(ptr %res, ptr %a0, ptr %a1) nounwind {
504; CHECK-LABEL: v16i8_icmp_ult:
505; CHECK:       # %bb.0:
506; CHECK-NEXT:    vld $vr0, $a1, 0
507; CHECK-NEXT:    vld $vr1, $a2, 0
508; CHECK-NEXT:    vslt.bu $vr0, $vr0, $vr1
509; CHECK-NEXT:    vst $vr0, $a0, 0
510; CHECK-NEXT:    ret
511  %v0 = load <16 x i8>, ptr %a0
512  %v1 = load <16 x i8>, ptr %a1
513  %cmp = icmp ult <16 x i8> %v0, %v1
514  %ext = sext <16 x i1> %cmp to <16 x i8>
515  store <16 x i8> %ext, ptr %res
516  ret void
517}
518
519define void @v8i16_icmp_ult_imm(ptr %res, ptr %a0) nounwind {
520; CHECK-LABEL: v8i16_icmp_ult_imm:
521; CHECK:       # %bb.0:
522; CHECK-NEXT:    vld $vr0, $a1, 0
523; CHECK-NEXT:    vslti.hu $vr0, $vr0, 31
524; CHECK-NEXT:    vst $vr0, $a0, 0
525; CHECK-NEXT:    ret
526  %v0 = load <8 x i16>, ptr %a0
527  %cmp = icmp ult <8 x i16> %v0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
528  %ext = sext <8 x i1> %cmp to <8 x i16>
529  store <8 x i16> %ext, ptr %res
530  ret void
531}
532
533define void @v8i16_icmp_ult(ptr %res, ptr %a0, ptr %a1) nounwind {
534; CHECK-LABEL: v8i16_icmp_ult:
535; CHECK:       # %bb.0:
536; CHECK-NEXT:    vld $vr0, $a1, 0
537; CHECK-NEXT:    vld $vr1, $a2, 0
538; CHECK-NEXT:    vslt.hu $vr0, $vr0, $vr1
539; CHECK-NEXT:    vst $vr0, $a0, 0
540; CHECK-NEXT:    ret
541  %v0 = load <8 x i16>, ptr %a0
542  %v1 = load <8 x i16>, ptr %a1
543  %cmp = icmp ult <8 x i16> %v0, %v1
544  %ext = sext <8 x i1> %cmp to <8 x i16>
545  store <8 x i16> %ext, ptr %res
546  ret void
547}
548
549define void @v4i32_icmp_ult_imm(ptr %res, ptr %a0) nounwind {
550; CHECK-LABEL: v4i32_icmp_ult_imm:
551; CHECK:       # %bb.0:
552; CHECK-NEXT:    vld $vr0, $a1, 0
553; CHECK-NEXT:    vslti.wu $vr0, $vr0, 31
554; CHECK-NEXT:    vst $vr0, $a0, 0
555; CHECK-NEXT:    ret
556  %v0 = load <4 x i32>, ptr %a0
557  %cmp = icmp ult <4 x i32> %v0, <i32 31, i32 31, i32 31, i32 31>
558  %ext = sext <4 x i1> %cmp to <4 x i32>
559  store <4 x i32> %ext, ptr %res
560  ret void
561}
562
563define void @v4i32_icmp_ult(ptr %res, ptr %a0, ptr %a1) nounwind {
564; CHECK-LABEL: v4i32_icmp_ult:
565; CHECK:       # %bb.0:
566; CHECK-NEXT:    vld $vr0, $a1, 0
567; CHECK-NEXT:    vld $vr1, $a2, 0
568; CHECK-NEXT:    vslt.wu $vr0, $vr0, $vr1
569; CHECK-NEXT:    vst $vr0, $a0, 0
570; CHECK-NEXT:    ret
571  %v0 = load <4 x i32>, ptr %a0
572  %v1 = load <4 x i32>, ptr %a1
573  %cmp = icmp ult <4 x i32> %v0, %v1
574  %ext = sext <4 x i1> %cmp to <4 x i32>
575  store <4 x i32> %ext, ptr %res
576  ret void
577}
578
579define void @v2i64_icmp_ult_imm(ptr %res, ptr %a0) nounwind {
580; CHECK-LABEL: v2i64_icmp_ult_imm:
581; CHECK:       # %bb.0:
582; CHECK-NEXT:    vld $vr0, $a1, 0
583; CHECK-NEXT:    vslti.du $vr0, $vr0, 31
584; CHECK-NEXT:    vst $vr0, $a0, 0
585; CHECK-NEXT:    ret
586  %v0 = load <2 x i64>, ptr %a0
587  %cmp = icmp ult <2 x i64> %v0, <i64 31, i64 31>
588  %ext = sext <2 x i1> %cmp to <2 x i64>
589  store <2 x i64> %ext, ptr %res
590  ret void
591}
592
593define void @v2i64_icmp_ult(ptr %res, ptr %a0, ptr %a1) nounwind {
594; CHECK-LABEL: v2i64_icmp_ult:
595; CHECK:       # %bb.0:
596; CHECK-NEXT:    vld $vr0, $a1, 0
597; CHECK-NEXT:    vld $vr1, $a2, 0
598; CHECK-NEXT:    vslt.du $vr0, $vr0, $vr1
599; CHECK-NEXT:    vst $vr0, $a0, 0
600; CHECK-NEXT:    ret
601  %v0 = load <2 x i64>, ptr %a0
602  %v1 = load <2 x i64>, ptr %a1
603  %cmp = icmp ult <2 x i64> %v0, %v1
604  %ext = sext <2 x i1> %cmp to <2 x i64>
605  store <2 x i64> %ext, ptr %res
606  ret void
607}
608
609;; Expand SETNE
610define void @v16i8_icmp_ne(ptr %res, ptr %a0, ptr %a1) nounwind {
611; CHECK-LABEL: v16i8_icmp_ne:
612; CHECK:       # %bb.0:
613; CHECK-NEXT:    vld $vr0, $a1, 0
614; CHECK-NEXT:    vld $vr1, $a2, 0
615; CHECK-NEXT:    vseq.b $vr0, $vr0, $vr1
616; CHECK-NEXT:    vxori.b $vr0, $vr0, 255
617; CHECK-NEXT:    vst $vr0, $a0, 0
618; CHECK-NEXT:    ret
619  %v0 = load <16 x i8>, ptr %a0
620  %v1 = load <16 x i8>, ptr %a1
621  %cmp = icmp ne <16 x i8> %v0, %v1
622  %ext = sext <16 x i1> %cmp to <16 x i8>
623  store <16 x i8> %ext, ptr %res
624  ret void
625}
626
627define void @v8i16_icmp_ne(ptr %res, ptr %a0, ptr %a1) nounwind {
628; CHECK-LABEL: v8i16_icmp_ne:
629; CHECK:       # %bb.0:
630; CHECK-NEXT:    vld $vr0, $a1, 0
631; CHECK-NEXT:    vld $vr1, $a2, 0
632; CHECK-NEXT:    vseq.h $vr0, $vr0, $vr1
633; CHECK-NEXT:    vrepli.b $vr1, -1
634; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
635; CHECK-NEXT:    vst $vr0, $a0, 0
636; CHECK-NEXT:    ret
637  %v0 = load <8 x i16>, ptr %a0
638  %v1 = load <8 x i16>, ptr %a1
639  %cmp = icmp ne <8 x i16> %v0, %v1
640  %ext = sext <8 x i1> %cmp to <8 x i16>
641  store <8 x i16> %ext, ptr %res
642  ret void
643}
644
645define void @v4i32_icmp_ne(ptr %res, ptr %a0, ptr %a1) nounwind {
646; CHECK-LABEL: v4i32_icmp_ne:
647; CHECK:       # %bb.0:
648; CHECK-NEXT:    vld $vr0, $a1, 0
649; CHECK-NEXT:    vld $vr1, $a2, 0
650; CHECK-NEXT:    vseq.w $vr0, $vr0, $vr1
651; CHECK-NEXT:    vrepli.b $vr1, -1
652; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
653; CHECK-NEXT:    vst $vr0, $a0, 0
654; CHECK-NEXT:    ret
655  %v0 = load <4 x i32>, ptr %a0
656  %v1 = load <4 x i32>, ptr %a1
657  %cmp = icmp ne <4 x i32> %v0, %v1
658  %ext = sext <4 x i1> %cmp to <4 x i32>
659  store <4 x i32> %ext, ptr %res
660  ret void
661}
662
663define void @v2i64_icmp_ne(ptr %res, ptr %a0, ptr %a1) nounwind {
664; CHECK-LABEL: v2i64_icmp_ne:
665; CHECK:       # %bb.0:
666; CHECK-NEXT:    vld $vr0, $a1, 0
667; CHECK-NEXT:    vld $vr1, $a2, 0
668; CHECK-NEXT:    vseq.d $vr0, $vr0, $vr1
669; CHECK-NEXT:    vrepli.b $vr1, -1
670; CHECK-NEXT:    vxor.v $vr0, $vr0, $vr1
671; CHECK-NEXT:    vst $vr0, $a0, 0
672; CHECK-NEXT:    ret
673  %v0 = load <2 x i64>, ptr %a0
674  %v1 = load <2 x i64>, ptr %a1
675  %cmp = icmp ne <2 x i64> %v0, %v1
676  %ext = sext <2 x i1> %cmp to <2 x i64>
677  store <2 x i64> %ext, ptr %res
678  ret void
679}
680
681;; Expand SETGE
682define void @v16i8_icmp_sge(ptr %res, ptr %a0, ptr %a1) nounwind {
683; CHECK-LABEL: v16i8_icmp_sge:
684; CHECK:       # %bb.0:
685; CHECK-NEXT:    vld $vr0, $a1, 0
686; CHECK-NEXT:    vld $vr1, $a2, 0
687; CHECK-NEXT:    vsle.b $vr0, $vr1, $vr0
688; CHECK-NEXT:    vst $vr0, $a0, 0
689; CHECK-NEXT:    ret
690  %v0 = load <16 x i8>, ptr %a0
691  %v1 = load <16 x i8>, ptr %a1
692  %cmp = icmp sge <16 x i8> %v0, %v1
693  %ext = sext <16 x i1> %cmp to <16 x i8>
694  store <16 x i8> %ext, ptr %res
695  ret void
696}
697
698define void @v8i16_icmp_sge(ptr %res, ptr %a0, ptr %a1) nounwind {
699; CHECK-LABEL: v8i16_icmp_sge:
700; CHECK:       # %bb.0:
701; CHECK-NEXT:    vld $vr0, $a1, 0
702; CHECK-NEXT:    vld $vr1, $a2, 0
703; CHECK-NEXT:    vsle.h $vr0, $vr1, $vr0
704; CHECK-NEXT:    vst $vr0, $a0, 0
705; CHECK-NEXT:    ret
706  %v0 = load <8 x i16>, ptr %a0
707  %v1 = load <8 x i16>, ptr %a1
708  %cmp = icmp sge <8 x i16> %v0, %v1
709  %ext = sext <8 x i1> %cmp to <8 x i16>
710  store <8 x i16> %ext, ptr %res
711  ret void
712}
713
714define void @v4i32_icmp_sge(ptr %res, ptr %a0, ptr %a1) nounwind {
715; CHECK-LABEL: v4i32_icmp_sge:
716; CHECK:       # %bb.0:
717; CHECK-NEXT:    vld $vr0, $a1, 0
718; CHECK-NEXT:    vld $vr1, $a2, 0
719; CHECK-NEXT:    vsle.w $vr0, $vr1, $vr0
720; CHECK-NEXT:    vst $vr0, $a0, 0
721; CHECK-NEXT:    ret
722  %v0 = load <4 x i32>, ptr %a0
723  %v1 = load <4 x i32>, ptr %a1
724  %cmp = icmp sge <4 x i32> %v0, %v1
725  %ext = sext <4 x i1> %cmp to <4 x i32>
726  store <4 x i32> %ext, ptr %res
727  ret void
728}
729
730define void @v2i64_icmp_sge(ptr %res, ptr %a0, ptr %a1) nounwind {
731; CHECK-LABEL: v2i64_icmp_sge:
732; CHECK:       # %bb.0:
733; CHECK-NEXT:    vld $vr0, $a1, 0
734; CHECK-NEXT:    vld $vr1, $a2, 0
735; CHECK-NEXT:    vsle.d $vr0, $vr1, $vr0
736; CHECK-NEXT:    vst $vr0, $a0, 0
737; CHECK-NEXT:    ret
738  %v0 = load <2 x i64>, ptr %a0
739  %v1 = load <2 x i64>, ptr %a1
740  %cmp = icmp sge <2 x i64> %v0, %v1
741  %ext = sext <2 x i1> %cmp to <2 x i64>
742  store <2 x i64> %ext, ptr %res
743  ret void
744}
745
746;; Expand SETUGE
747define void @v16i8_icmp_uge(ptr %res, ptr %a0, ptr %a1) nounwind {
748; CHECK-LABEL: v16i8_icmp_uge:
749; CHECK:       # %bb.0:
750; CHECK-NEXT:    vld $vr0, $a1, 0
751; CHECK-NEXT:    vld $vr1, $a2, 0
752; CHECK-NEXT:    vsle.bu $vr0, $vr1, $vr0
753; CHECK-NEXT:    vst $vr0, $a0, 0
754; CHECK-NEXT:    ret
755  %v0 = load <16 x i8>, ptr %a0
756  %v1 = load <16 x i8>, ptr %a1
757  %cmp = icmp uge <16 x i8> %v0, %v1
758  %ext = sext <16 x i1> %cmp to <16 x i8>
759  store <16 x i8> %ext, ptr %res
760  ret void
761}
762
763define void @v8i16_icmp_uge(ptr %res, ptr %a0, ptr %a1) nounwind {
764; CHECK-LABEL: v8i16_icmp_uge:
765; CHECK:       # %bb.0:
766; CHECK-NEXT:    vld $vr0, $a1, 0
767; CHECK-NEXT:    vld $vr1, $a2, 0
768; CHECK-NEXT:    vsle.hu $vr0, $vr1, $vr0
769; CHECK-NEXT:    vst $vr0, $a0, 0
770; CHECK-NEXT:    ret
771  %v0 = load <8 x i16>, ptr %a0
772  %v1 = load <8 x i16>, ptr %a1
773  %cmp = icmp uge <8 x i16> %v0, %v1
774  %ext = sext <8 x i1> %cmp to <8 x i16>
775  store <8 x i16> %ext, ptr %res
776  ret void
777}
778
779define void @v4i32_icmp_uge(ptr %res, ptr %a0, ptr %a1) nounwind {
780; CHECK-LABEL: v4i32_icmp_uge:
781; CHECK:       # %bb.0:
782; CHECK-NEXT:    vld $vr0, $a1, 0
783; CHECK-NEXT:    vld $vr1, $a2, 0
784; CHECK-NEXT:    vsle.wu $vr0, $vr1, $vr0
785; CHECK-NEXT:    vst $vr0, $a0, 0
786; CHECK-NEXT:    ret
787  %v0 = load <4 x i32>, ptr %a0
788  %v1 = load <4 x i32>, ptr %a1
789  %cmp = icmp uge <4 x i32> %v0, %v1
790  %ext = sext <4 x i1> %cmp to <4 x i32>
791  store <4 x i32> %ext, ptr %res
792  ret void
793}
794
795define void @v2i64_icmp_uge(ptr %res, ptr %a0, ptr %a1) nounwind {
796; CHECK-LABEL: v2i64_icmp_uge:
797; CHECK:       # %bb.0:
798; CHECK-NEXT:    vld $vr0, $a1, 0
799; CHECK-NEXT:    vld $vr1, $a2, 0
800; CHECK-NEXT:    vsle.du $vr0, $vr1, $vr0
801; CHECK-NEXT:    vst $vr0, $a0, 0
802; CHECK-NEXT:    ret
803  %v0 = load <2 x i64>, ptr %a0
804  %v1 = load <2 x i64>, ptr %a1
805  %cmp = icmp uge <2 x i64> %v0, %v1
806  %ext = sext <2 x i1> %cmp to <2 x i64>
807  store <2 x i64> %ext, ptr %res
808  ret void
809}
810
811;; Expand SETGT
812define void @v16i8_icmp_sgt(ptr %res, ptr %a0, ptr %a1) nounwind {
813; CHECK-LABEL: v16i8_icmp_sgt:
814; CHECK:       # %bb.0:
815; CHECK-NEXT:    vld $vr0, $a1, 0
816; CHECK-NEXT:    vld $vr1, $a2, 0
817; CHECK-NEXT:    vslt.b $vr0, $vr1, $vr0
818; CHECK-NEXT:    vst $vr0, $a0, 0
819; CHECK-NEXT:    ret
820  %v0 = load <16 x i8>, ptr %a0
821  %v1 = load <16 x i8>, ptr %a1
822  %cmp = icmp sgt <16 x i8> %v0, %v1
823  %ext = sext <16 x i1> %cmp to <16 x i8>
824  store <16 x i8> %ext, ptr %res
825  ret void
826}
827
828define void @v8i16_icmp_sgt(ptr %res, ptr %a0, ptr %a1) nounwind {
829; CHECK-LABEL: v8i16_icmp_sgt:
830; CHECK:       # %bb.0:
831; CHECK-NEXT:    vld $vr0, $a1, 0
832; CHECK-NEXT:    vld $vr1, $a2, 0
833; CHECK-NEXT:    vslt.h $vr0, $vr1, $vr0
834; CHECK-NEXT:    vst $vr0, $a0, 0
835; CHECK-NEXT:    ret
836  %v0 = load <8 x i16>, ptr %a0
837  %v1 = load <8 x i16>, ptr %a1
838  %cmp = icmp sgt <8 x i16> %v0, %v1
839  %ext = sext <8 x i1> %cmp to <8 x i16>
840  store <8 x i16> %ext, ptr %res
841  ret void
842}
843
844define void @v4i32_icmp_sgt(ptr %res, ptr %a0, ptr %a1) nounwind {
845; CHECK-LABEL: v4i32_icmp_sgt:
846; CHECK:       # %bb.0:
847; CHECK-NEXT:    vld $vr0, $a1, 0
848; CHECK-NEXT:    vld $vr1, $a2, 0
849; CHECK-NEXT:    vslt.w $vr0, $vr1, $vr0
850; CHECK-NEXT:    vst $vr0, $a0, 0
851; CHECK-NEXT:    ret
852  %v0 = load <4 x i32>, ptr %a0
853  %v1 = load <4 x i32>, ptr %a1
854  %cmp = icmp sgt <4 x i32> %v0, %v1
855  %ext = sext <4 x i1> %cmp to <4 x i32>
856  store <4 x i32> %ext, ptr %res
857  ret void
858}
859
860define void @v2i64_icmp_sgt(ptr %res, ptr %a0, ptr %a1) nounwind {
861; CHECK-LABEL: v2i64_icmp_sgt:
862; CHECK:       # %bb.0:
863; CHECK-NEXT:    vld $vr0, $a1, 0
864; CHECK-NEXT:    vld $vr1, $a2, 0
865; CHECK-NEXT:    vslt.d $vr0, $vr1, $vr0
866; CHECK-NEXT:    vst $vr0, $a0, 0
867; CHECK-NEXT:    ret
868  %v0 = load <2 x i64>, ptr %a0
869  %v1 = load <2 x i64>, ptr %a1
870  %cmp = icmp sgt <2 x i64> %v0, %v1
871  %ext = sext <2 x i1> %cmp to <2 x i64>
872  store <2 x i64> %ext, ptr %res
873  ret void
874}
875
876;; Expand SETUGT
877define void @v16i8_icmp_ugt(ptr %res, ptr %a0, ptr %a1) nounwind {
878; CHECK-LABEL: v16i8_icmp_ugt:
879; CHECK:       # %bb.0:
880; CHECK-NEXT:    vld $vr0, $a1, 0
881; CHECK-NEXT:    vld $vr1, $a2, 0
882; CHECK-NEXT:    vslt.bu $vr0, $vr1, $vr0
883; CHECK-NEXT:    vst $vr0, $a0, 0
884; CHECK-NEXT:    ret
885  %v0 = load <16 x i8>, ptr %a0
886  %v1 = load <16 x i8>, ptr %a1
887  %cmp = icmp ugt <16 x i8> %v0, %v1
888  %ext = sext <16 x i1> %cmp to <16 x i8>
889  store <16 x i8> %ext, ptr %res
890  ret void
891}
892
893define void @v8i16_icmp_ugt(ptr %res, ptr %a0, ptr %a1) nounwind {
894; CHECK-LABEL: v8i16_icmp_ugt:
895; CHECK:       # %bb.0:
896; CHECK-NEXT:    vld $vr0, $a1, 0
897; CHECK-NEXT:    vld $vr1, $a2, 0
898; CHECK-NEXT:    vslt.hu $vr0, $vr1, $vr0
899; CHECK-NEXT:    vst $vr0, $a0, 0
900; CHECK-NEXT:    ret
901  %v0 = load <8 x i16>, ptr %a0
902  %v1 = load <8 x i16>, ptr %a1
903  %cmp = icmp ugt <8 x i16> %v0, %v1
904  %ext = sext <8 x i1> %cmp to <8 x i16>
905  store <8 x i16> %ext, ptr %res
906  ret void
907}
908
909define void @v4i32_icmp_ugt(ptr %res, ptr %a0, ptr %a1) nounwind {
910; CHECK-LABEL: v4i32_icmp_ugt:
911; CHECK:       # %bb.0:
912; CHECK-NEXT:    vld $vr0, $a1, 0
913; CHECK-NEXT:    vld $vr1, $a2, 0
914; CHECK-NEXT:    vslt.wu $vr0, $vr1, $vr0
915; CHECK-NEXT:    vst $vr0, $a0, 0
916; CHECK-NEXT:    ret
917  %v0 = load <4 x i32>, ptr %a0
918  %v1 = load <4 x i32>, ptr %a1
919  %cmp = icmp ugt <4 x i32> %v0, %v1
920  %ext = sext <4 x i1> %cmp to <4 x i32>
921  store <4 x i32> %ext, ptr %res
922  ret void
923}
924
925define void @v2i64_icmp_ugt(ptr %res, ptr %a0, ptr %a1) nounwind {
926; CHECK-LABEL: v2i64_icmp_ugt:
927; CHECK:       # %bb.0:
928; CHECK-NEXT:    vld $vr0, $a1, 0
929; CHECK-NEXT:    vld $vr1, $a2, 0
930; CHECK-NEXT:    vslt.du $vr0, $vr1, $vr0
931; CHECK-NEXT:    vst $vr0, $a0, 0
932; CHECK-NEXT:    ret
933  %v0 = load <2 x i64>, ptr %a0
934  %v1 = load <2 x i64>, ptr %a1
935  %cmp = icmp ugt <2 x i64> %v0, %v1
936  %ext = sext <2 x i1> %cmp to <2 x i64>
937  store <2 x i64> %ext, ptr %res
938  ret void
939}
940