xref: /llvm-project/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/fcmp.ll (revision a5c90e48b6f11bc6db7344503589648f76b16d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
3
4;; TREU
5define void @v8f32_fcmp_true(ptr %res, ptr %a0, ptr %a1) nounwind {
6; CHECK-LABEL: v8f32_fcmp_true:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    xvrepli.b $xr0, -1
9; CHECK-NEXT:    xvst $xr0, $a0, 0
10; CHECK-NEXT:    ret
11  %v0 = load <8 x float>, ptr %a0
12  %v1 = load <8 x float>, ptr %a1
13  %cmp = fcmp true <8 x float> %v0, %v1
14  %ext = sext <8 x i1> %cmp to <8 x i32>
15  store <8 x i32> %ext, ptr %res
16  ret void
17}
18
19;; FALSE
20define void @v4f64_fcmp_false(ptr %res, ptr %a0, ptr %a1) nounwind {
21; CHECK-LABEL: v4f64_fcmp_false:
22; CHECK:       # %bb.0:
23; CHECK-NEXT:    xvrepli.b $xr0, 0
24; CHECK-NEXT:    xvst $xr0, $a0, 0
25; CHECK-NEXT:    ret
26  %v0 = load <4 x double>, ptr %a0
27  %v1 = load <4 x double>, ptr %a1
28  %cmp = fcmp false <4 x double> %v0, %v1
29  %ext = sext <4 x i1> %cmp to <4 x i64>
30  store <4 x i64> %ext, ptr %res
31  ret void
32}
33
34;; SETOEQ
35define void @v8f32_fcmp_oeq(ptr %res, ptr %a0, ptr %a1) nounwind {
36; CHECK-LABEL: v8f32_fcmp_oeq:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    xvld $xr0, $a1, 0
39; CHECK-NEXT:    xvld $xr1, $a2, 0
40; CHECK-NEXT:    xvfcmp.ceq.s $xr0, $xr0, $xr1
41; CHECK-NEXT:    xvst $xr0, $a0, 0
42; CHECK-NEXT:    ret
43  %v0 = load <8 x float>, ptr %a0
44  %v1 = load <8 x float>, ptr %a1
45  %cmp = fcmp oeq <8 x float> %v0, %v1
46  %ext = sext <8 x i1> %cmp to <8 x i32>
47  store <8 x i32> %ext, ptr %res
48  ret void
49}
50
51define void @v4f64_fcmp_oeq(ptr %res, ptr %a0, ptr %a1) nounwind {
52; CHECK-LABEL: v4f64_fcmp_oeq:
53; CHECK:       # %bb.0:
54; CHECK-NEXT:    xvld $xr0, $a1, 0
55; CHECK-NEXT:    xvld $xr1, $a2, 0
56; CHECK-NEXT:    xvfcmp.ceq.d $xr0, $xr0, $xr1
57; CHECK-NEXT:    xvst $xr0, $a0, 0
58; CHECK-NEXT:    ret
59  %v0 = load <4 x double>, ptr %a0
60  %v1 = load <4 x double>, ptr %a1
61  %cmp = fcmp oeq <4 x double> %v0, %v1
62  %ext = sext <4 x i1> %cmp to <4 x i64>
63  store <4 x i64> %ext, ptr %res
64  ret void
65}
66
67;; SETUEQ
68define void @v8f32_fcmp_ueq(ptr %res, ptr %a0, ptr %a1) nounwind {
69; CHECK-LABEL: v8f32_fcmp_ueq:
70; CHECK:       # %bb.0:
71; CHECK-NEXT:    xvld $xr0, $a1, 0
72; CHECK-NEXT:    xvld $xr1, $a2, 0
73; CHECK-NEXT:    xvfcmp.cueq.s $xr0, $xr0, $xr1
74; CHECK-NEXT:    xvst $xr0, $a0, 0
75; CHECK-NEXT:    ret
76  %v0 = load <8 x float>, ptr %a0
77  %v1 = load <8 x float>, ptr %a1
78  %cmp = fcmp ueq <8 x float> %v0, %v1
79  %ext = sext <8 x i1> %cmp to <8 x i32>
80  store <8 x i32> %ext, ptr %res
81  ret void
82}
83
84define void @v4f64_fcmp_ueq(ptr %res, ptr %a0, ptr %a1) nounwind {
85; CHECK-LABEL: v4f64_fcmp_ueq:
86; CHECK:       # %bb.0:
87; CHECK-NEXT:    xvld $xr0, $a1, 0
88; CHECK-NEXT:    xvld $xr1, $a2, 0
89; CHECK-NEXT:    xvfcmp.cueq.d $xr0, $xr0, $xr1
90; CHECK-NEXT:    xvst $xr0, $a0, 0
91; CHECK-NEXT:    ret
92  %v0 = load <4 x double>, ptr %a0
93  %v1 = load <4 x double>, ptr %a1
94  %cmp = fcmp ueq <4 x double> %v0, %v1
95  %ext = sext <4 x i1> %cmp to <4 x i64>
96  store <4 x i64> %ext, ptr %res
97  ret void
98}
99
100;; SETEQ
101define void @v8f32_fcmp_eq(ptr %res, ptr %a0, ptr %a1) nounwind {
102; CHECK-LABEL: v8f32_fcmp_eq:
103; CHECK:       # %bb.0:
104; CHECK-NEXT:    xvld $xr0, $a1, 0
105; CHECK-NEXT:    xvld $xr1, $a2, 0
106; CHECK-NEXT:    xvfcmp.ceq.s $xr0, $xr0, $xr1
107; CHECK-NEXT:    xvst $xr0, $a0, 0
108; CHECK-NEXT:    ret
109  %v0 = load <8 x float>, ptr %a0
110  %v1 = load <8 x float>, ptr %a1
111  %cmp = fcmp fast oeq <8 x float> %v0, %v1
112  %ext = sext <8 x i1> %cmp to <8 x i32>
113  store <8 x i32> %ext, ptr %res
114  ret void
115}
116
117define void @v4f64_fcmp_eq(ptr %res, ptr %a0, ptr %a1) nounwind {
118; CHECK-LABEL: v4f64_fcmp_eq:
119; CHECK:       # %bb.0:
120; CHECK-NEXT:    xvld $xr0, $a1, 0
121; CHECK-NEXT:    xvld $xr1, $a2, 0
122; CHECK-NEXT:    xvfcmp.ceq.d $xr0, $xr0, $xr1
123; CHECK-NEXT:    xvst $xr0, $a0, 0
124; CHECK-NEXT:    ret
125  %v0 = load <4 x double>, ptr %a0
126  %v1 = load <4 x double>, ptr %a1
127  %cmp = fcmp fast ueq <4 x double> %v0, %v1
128  %ext = sext <4 x i1> %cmp to <4 x i64>
129  store <4 x i64> %ext, ptr %res
130  ret void
131}
132
133;; SETOLE
134define void @v8f32_fcmp_ole(ptr %res, ptr %a0, ptr %a1) nounwind {
135; CHECK-LABEL: v8f32_fcmp_ole:
136; CHECK:       # %bb.0:
137; CHECK-NEXT:    xvld $xr0, $a1, 0
138; CHECK-NEXT:    xvld $xr1, $a2, 0
139; CHECK-NEXT:    xvfcmp.cle.s $xr0, $xr0, $xr1
140; CHECK-NEXT:    xvst $xr0, $a0, 0
141; CHECK-NEXT:    ret
142  %v0 = load <8 x float>, ptr %a0
143  %v1 = load <8 x float>, ptr %a1
144  %cmp = fcmp ole <8 x float> %v0, %v1
145  %ext = sext <8 x i1> %cmp to <8 x i32>
146  store <8 x i32> %ext, ptr %res
147  ret void
148}
149
150define void @v4f64_fcmp_ole(ptr %res, ptr %a0, ptr %a1) nounwind {
151; CHECK-LABEL: v4f64_fcmp_ole:
152; CHECK:       # %bb.0:
153; CHECK-NEXT:    xvld $xr0, $a1, 0
154; CHECK-NEXT:    xvld $xr1, $a2, 0
155; CHECK-NEXT:    xvfcmp.cle.d $xr0, $xr0, $xr1
156; CHECK-NEXT:    xvst $xr0, $a0, 0
157; CHECK-NEXT:    ret
158  %v0 = load <4 x double>, ptr %a0
159  %v1 = load <4 x double>, ptr %a1
160  %cmp = fcmp ole <4 x double> %v0, %v1
161  %ext = sext <4 x i1> %cmp to <4 x i64>
162  store <4 x i64> %ext, ptr %res
163  ret void
164}
165
166;; SETULE
167define void @v8f32_fcmp_ule(ptr %res, ptr %a0, ptr %a1) nounwind {
168; CHECK-LABEL: v8f32_fcmp_ule:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    xvld $xr0, $a1, 0
171; CHECK-NEXT:    xvld $xr1, $a2, 0
172; CHECK-NEXT:    xvfcmp.cule.s $xr0, $xr0, $xr1
173; CHECK-NEXT:    xvst $xr0, $a0, 0
174; CHECK-NEXT:    ret
175  %v0 = load <8 x float>, ptr %a0
176  %v1 = load <8 x float>, ptr %a1
177  %cmp = fcmp ule <8 x float> %v0, %v1
178  %ext = sext <8 x i1> %cmp to <8 x i32>
179  store <8 x i32> %ext, ptr %res
180  ret void
181}
182
183define void @v4f64_fcmp_ule(ptr %res, ptr %a0, ptr %a1) nounwind {
184; CHECK-LABEL: v4f64_fcmp_ule:
185; CHECK:       # %bb.0:
186; CHECK-NEXT:    xvld $xr0, $a1, 0
187; CHECK-NEXT:    xvld $xr1, $a2, 0
188; CHECK-NEXT:    xvfcmp.cule.d $xr0, $xr0, $xr1
189; CHECK-NEXT:    xvst $xr0, $a0, 0
190; CHECK-NEXT:    ret
191  %v0 = load <4 x double>, ptr %a0
192  %v1 = load <4 x double>, ptr %a1
193  %cmp = fcmp ule <4 x double> %v0, %v1
194  %ext = sext <4 x i1> %cmp to <4 x i64>
195  store <4 x i64> %ext, ptr %res
196  ret void
197}
198
199;; SETLE
200define void @v8f32_fcmp_le(ptr %res, ptr %a0, ptr %a1) nounwind {
201; CHECK-LABEL: v8f32_fcmp_le:
202; CHECK:       # %bb.0:
203; CHECK-NEXT:    xvld $xr0, $a1, 0
204; CHECK-NEXT:    xvld $xr1, $a2, 0
205; CHECK-NEXT:    xvfcmp.cle.s $xr0, $xr0, $xr1
206; CHECK-NEXT:    xvst $xr0, $a0, 0
207; CHECK-NEXT:    ret
208  %v0 = load <8 x float>, ptr %a0
209  %v1 = load <8 x float>, ptr %a1
210  %cmp = fcmp fast ole <8 x float> %v0, %v1
211  %ext = sext <8 x i1> %cmp to <8 x i32>
212  store <8 x i32> %ext, ptr %res
213  ret void
214}
215
216define void @v4f64_fcmp_le(ptr %res, ptr %a0, ptr %a1) nounwind {
217; CHECK-LABEL: v4f64_fcmp_le:
218; CHECK:       # %bb.0:
219; CHECK-NEXT:    xvld $xr0, $a1, 0
220; CHECK-NEXT:    xvld $xr1, $a2, 0
221; CHECK-NEXT:    xvfcmp.cle.d $xr0, $xr0, $xr1
222; CHECK-NEXT:    xvst $xr0, $a0, 0
223; CHECK-NEXT:    ret
224  %v0 = load <4 x double>, ptr %a0
225  %v1 = load <4 x double>, ptr %a1
226  %cmp = fcmp fast ule <4 x double> %v0, %v1
227  %ext = sext <4 x i1> %cmp to <4 x i64>
228  store <4 x i64> %ext, ptr %res
229  ret void
230}
231
232;; SETOLT
233define void @v8f32_fcmp_olt(ptr %res, ptr %a0, ptr %a1) nounwind {
234; CHECK-LABEL: v8f32_fcmp_olt:
235; CHECK:       # %bb.0:
236; CHECK-NEXT:    xvld $xr0, $a1, 0
237; CHECK-NEXT:    xvld $xr1, $a2, 0
238; CHECK-NEXT:    xvfcmp.clt.s $xr0, $xr0, $xr1
239; CHECK-NEXT:    xvst $xr0, $a0, 0
240; CHECK-NEXT:    ret
241  %v0 = load <8 x float>, ptr %a0
242  %v1 = load <8 x float>, ptr %a1
243  %cmp = fcmp olt <8 x float> %v0, %v1
244  %ext = sext <8 x i1> %cmp to <8 x i32>
245  store <8 x i32> %ext, ptr %res
246  ret void
247}
248
249define void @v4f64_fcmp_olt(ptr %res, ptr %a0, ptr %a1) nounwind {
250; CHECK-LABEL: v4f64_fcmp_olt:
251; CHECK:       # %bb.0:
252; CHECK-NEXT:    xvld $xr0, $a1, 0
253; CHECK-NEXT:    xvld $xr1, $a2, 0
254; CHECK-NEXT:    xvfcmp.clt.d $xr0, $xr0, $xr1
255; CHECK-NEXT:    xvst $xr0, $a0, 0
256; CHECK-NEXT:    ret
257  %v0 = load <4 x double>, ptr %a0
258  %v1 = load <4 x double>, ptr %a1
259  %cmp = fcmp olt <4 x double> %v0, %v1
260  %ext = sext <4 x i1> %cmp to <4 x i64>
261  store <4 x i64> %ext, ptr %res
262  ret void
263}
264
265;; SETULT
266define void @v8f32_fcmp_ult(ptr %res, ptr %a0, ptr %a1) nounwind {
267; CHECK-LABEL: v8f32_fcmp_ult:
268; CHECK:       # %bb.0:
269; CHECK-NEXT:    xvld $xr0, $a1, 0
270; CHECK-NEXT:    xvld $xr1, $a2, 0
271; CHECK-NEXT:    xvfcmp.cult.s $xr0, $xr0, $xr1
272; CHECK-NEXT:    xvst $xr0, $a0, 0
273; CHECK-NEXT:    ret
274  %v0 = load <8 x float>, ptr %a0
275  %v1 = load <8 x float>, ptr %a1
276  %cmp = fcmp ult <8 x float> %v0, %v1
277  %ext = sext <8 x i1> %cmp to <8 x i32>
278  store <8 x i32> %ext, ptr %res
279  ret void
280}
281
282define void @v4f64_fcmp_ult(ptr %res, ptr %a0, ptr %a1) nounwind {
283; CHECK-LABEL: v4f64_fcmp_ult:
284; CHECK:       # %bb.0:
285; CHECK-NEXT:    xvld $xr0, $a1, 0
286; CHECK-NEXT:    xvld $xr1, $a2, 0
287; CHECK-NEXT:    xvfcmp.cult.d $xr0, $xr0, $xr1
288; CHECK-NEXT:    xvst $xr0, $a0, 0
289; CHECK-NEXT:    ret
290  %v0 = load <4 x double>, ptr %a0
291  %v1 = load <4 x double>, ptr %a1
292  %cmp = fcmp ult <4 x double> %v0, %v1
293  %ext = sext <4 x i1> %cmp to <4 x i64>
294  store <4 x i64> %ext, ptr %res
295  ret void
296}
297
298;; SETLT
299define void @v8f32_fcmp_lt(ptr %res, ptr %a0, ptr %a1) nounwind {
300; CHECK-LABEL: v8f32_fcmp_lt:
301; CHECK:       # %bb.0:
302; CHECK-NEXT:    xvld $xr0, $a1, 0
303; CHECK-NEXT:    xvld $xr1, $a2, 0
304; CHECK-NEXT:    xvfcmp.clt.s $xr0, $xr0, $xr1
305; CHECK-NEXT:    xvst $xr0, $a0, 0
306; CHECK-NEXT:    ret
307  %v0 = load <8 x float>, ptr %a0
308  %v1 = load <8 x float>, ptr %a1
309  %cmp = fcmp fast olt <8 x float> %v0, %v1
310  %ext = sext <8 x i1> %cmp to <8 x i32>
311  store <8 x i32> %ext, ptr %res
312  ret void
313}
314
315define void @v4f64_fcmp_lt(ptr %res, ptr %a0, ptr %a1) nounwind {
316; CHECK-LABEL: v4f64_fcmp_lt:
317; CHECK:       # %bb.0:
318; CHECK-NEXT:    xvld $xr0, $a1, 0
319; CHECK-NEXT:    xvld $xr1, $a2, 0
320; CHECK-NEXT:    xvfcmp.clt.d $xr0, $xr0, $xr1
321; CHECK-NEXT:    xvst $xr0, $a0, 0
322; CHECK-NEXT:    ret
323  %v0 = load <4 x double>, ptr %a0
324  %v1 = load <4 x double>, ptr %a1
325  %cmp = fcmp fast ult <4 x double> %v0, %v1
326  %ext = sext <4 x i1> %cmp to <4 x i64>
327  store <4 x i64> %ext, ptr %res
328  ret void
329}
330
331;; SETONE
332define void @v8f32_fcmp_one(ptr %res, ptr %a0, ptr %a1) nounwind {
333; CHECK-LABEL: v8f32_fcmp_one:
334; CHECK:       # %bb.0:
335; CHECK-NEXT:    xvld $xr0, $a1, 0
336; CHECK-NEXT:    xvld $xr1, $a2, 0
337; CHECK-NEXT:    xvfcmp.cne.s $xr0, $xr0, $xr1
338; CHECK-NEXT:    xvst $xr0, $a0, 0
339; CHECK-NEXT:    ret
340  %v0 = load <8 x float>, ptr %a0
341  %v1 = load <8 x float>, ptr %a1
342  %cmp = fcmp one <8 x float> %v0, %v1
343  %ext = sext <8 x i1> %cmp to <8 x i32>
344  store <8 x i32> %ext, ptr %res
345  ret void
346}
347
348define void @v4f64_fcmp_one(ptr %res, ptr %a0, ptr %a1) nounwind {
349; CHECK-LABEL: v4f64_fcmp_one:
350; CHECK:       # %bb.0:
351; CHECK-NEXT:    xvld $xr0, $a1, 0
352; CHECK-NEXT:    xvld $xr1, $a2, 0
353; CHECK-NEXT:    xvfcmp.cne.d $xr0, $xr0, $xr1
354; CHECK-NEXT:    xvst $xr0, $a0, 0
355; CHECK-NEXT:    ret
356  %v0 = load <4 x double>, ptr %a0
357  %v1 = load <4 x double>, ptr %a1
358  %cmp = fcmp one <4 x double> %v0, %v1
359  %ext = sext <4 x i1> %cmp to <4 x i64>
360  store <4 x i64> %ext, ptr %res
361  ret void
362}
363
364;; SETUNE
365define void @v8f32_fcmp_une(ptr %res, ptr %a0, ptr %a1) nounwind {
366; CHECK-LABEL: v8f32_fcmp_une:
367; CHECK:       # %bb.0:
368; CHECK-NEXT:    xvld $xr0, $a1, 0
369; CHECK-NEXT:    xvld $xr1, $a2, 0
370; CHECK-NEXT:    xvfcmp.cune.s $xr0, $xr0, $xr1
371; CHECK-NEXT:    xvst $xr0, $a0, 0
372; CHECK-NEXT:    ret
373  %v0 = load <8 x float>, ptr %a0
374  %v1 = load <8 x float>, ptr %a1
375  %cmp = fcmp une <8 x float> %v0, %v1
376  %ext = sext <8 x i1> %cmp to <8 x i32>
377  store <8 x i32> %ext, ptr %res
378  ret void
379}
380
381define void @v4f64_fcmp_une(ptr %res, ptr %a0, ptr %a1) nounwind {
382; CHECK-LABEL: v4f64_fcmp_une:
383; CHECK:       # %bb.0:
384; CHECK-NEXT:    xvld $xr0, $a1, 0
385; CHECK-NEXT:    xvld $xr1, $a2, 0
386; CHECK-NEXT:    xvfcmp.cune.d $xr0, $xr0, $xr1
387; CHECK-NEXT:    xvst $xr0, $a0, 0
388; CHECK-NEXT:    ret
389  %v0 = load <4 x double>, ptr %a0
390  %v1 = load <4 x double>, ptr %a1
391  %cmp = fcmp une <4 x double> %v0, %v1
392  %ext = sext <4 x i1> %cmp to <4 x i64>
393  store <4 x i64> %ext, ptr %res
394  ret void
395}
396
397;; SETNE
398define void @v8f32_fcmp_ne(ptr %res, ptr %a0, ptr %a1) nounwind {
399; CHECK-LABEL: v8f32_fcmp_ne:
400; CHECK:       # %bb.0:
401; CHECK-NEXT:    xvld $xr0, $a1, 0
402; CHECK-NEXT:    xvld $xr1, $a2, 0
403; CHECK-NEXT:    xvfcmp.cne.s $xr0, $xr0, $xr1
404; CHECK-NEXT:    xvst $xr0, $a0, 0
405; CHECK-NEXT:    ret
406  %v0 = load <8 x float>, ptr %a0
407  %v1 = load <8 x float>, ptr %a1
408  %cmp = fcmp fast one <8 x float> %v0, %v1
409  %ext = sext <8 x i1> %cmp to <8 x i32>
410  store <8 x i32> %ext, ptr %res
411  ret void
412}
413
414define void @v4f64_fcmp_ne(ptr %res, ptr %a0, ptr %a1) nounwind {
415; CHECK-LABEL: v4f64_fcmp_ne:
416; CHECK:       # %bb.0:
417; CHECK-NEXT:    xvld $xr0, $a1, 0
418; CHECK-NEXT:    xvld $xr1, $a2, 0
419; CHECK-NEXT:    xvfcmp.cne.d $xr0, $xr0, $xr1
420; CHECK-NEXT:    xvst $xr0, $a0, 0
421; CHECK-NEXT:    ret
422  %v0 = load <4 x double>, ptr %a0
423  %v1 = load <4 x double>, ptr %a1
424  %cmp = fcmp fast une <4 x double> %v0, %v1
425  %ext = sext <4 x i1> %cmp to <4 x i64>
426  store <4 x i64> %ext, ptr %res
427  ret void
428}
429
430;; SETO
431define void @v8f32_fcmp_ord(ptr %res, ptr %a0, ptr %a1) nounwind {
432; CHECK-LABEL: v8f32_fcmp_ord:
433; CHECK:       # %bb.0:
434; CHECK-NEXT:    xvld $xr0, $a1, 0
435; CHECK-NEXT:    xvld $xr1, $a2, 0
436; CHECK-NEXT:    xvfcmp.cor.s $xr0, $xr0, $xr1
437; CHECK-NEXT:    xvst $xr0, $a0, 0
438; CHECK-NEXT:    ret
439  %v0 = load <8 x float>, ptr %a0
440  %v1 = load <8 x float>, ptr %a1
441  %cmp = fcmp ord <8 x float> %v0, %v1
442  %ext = sext <8 x i1> %cmp to <8 x i32>
443  store <8 x i32> %ext, ptr %res
444  ret void
445}
446
447define void @v4f64_fcmp_ord(ptr %res, ptr %a0, ptr %a1) nounwind {
448; CHECK-LABEL: v4f64_fcmp_ord:
449; CHECK:       # %bb.0:
450; CHECK-NEXT:    xvld $xr0, $a1, 0
451; CHECK-NEXT:    xvld $xr1, $a2, 0
452; CHECK-NEXT:    xvfcmp.cor.d $xr0, $xr0, $xr1
453; CHECK-NEXT:    xvst $xr0, $a0, 0
454; CHECK-NEXT:    ret
455  %v0 = load <4 x double>, ptr %a0
456  %v1 = load <4 x double>, ptr %a1
457  %cmp = fcmp ord <4 x double> %v0, %v1
458  %ext = sext <4 x i1> %cmp to <4 x i64>
459  store <4 x i64> %ext, ptr %res
460  ret void
461}
462
463;; SETUO
464define void @v8f32_fcmp_uno(ptr %res, ptr %a0, ptr %a1) nounwind {
465; CHECK-LABEL: v8f32_fcmp_uno:
466; CHECK:       # %bb.0:
467; CHECK-NEXT:    xvld $xr0, $a1, 0
468; CHECK-NEXT:    xvld $xr1, $a2, 0
469; CHECK-NEXT:    xvfcmp.cun.s $xr0, $xr0, $xr1
470; CHECK-NEXT:    xvst $xr0, $a0, 0
471; CHECK-NEXT:    ret
472  %v0 = load <8 x float>, ptr %a0
473  %v1 = load <8 x float>, ptr %a1
474  %cmp = fcmp uno <8 x float> %v0, %v1
475  %ext = sext <8 x i1> %cmp to <8 x i32>
476  store <8 x i32> %ext, ptr %res
477  ret void
478}
479
480define void @v4f64_fcmp_uno(ptr %res, ptr %a0, ptr %a1) nounwind {
481; CHECK-LABEL: v4f64_fcmp_uno:
482; CHECK:       # %bb.0:
483; CHECK-NEXT:    xvld $xr0, $a1, 0
484; CHECK-NEXT:    xvld $xr1, $a2, 0
485; CHECK-NEXT:    xvfcmp.cun.d $xr0, $xr0, $xr1
486; CHECK-NEXT:    xvst $xr0, $a0, 0
487; CHECK-NEXT:    ret
488  %v0 = load <4 x double>, ptr %a0
489  %v1 = load <4 x double>, ptr %a1
490  %cmp = fcmp uno <4 x double> %v0, %v1
491  %ext = sext <4 x i1> %cmp to <4 x i64>
492  store <4 x i64> %ext, ptr %res
493  ret void
494}
495
496;; Expand SETOGT
497define void @v8f32_fcmp_ogt(ptr %res, ptr %a0, ptr %a1) nounwind {
498; CHECK-LABEL: v8f32_fcmp_ogt:
499; CHECK:       # %bb.0:
500; CHECK-NEXT:    xvld $xr0, $a1, 0
501; CHECK-NEXT:    xvld $xr1, $a2, 0
502; CHECK-NEXT:    xvfcmp.clt.s $xr0, $xr1, $xr0
503; CHECK-NEXT:    xvst $xr0, $a0, 0
504; CHECK-NEXT:    ret
505  %v0 = load <8 x float>, ptr %a0
506  %v1 = load <8 x float>, ptr %a1
507  %cmp = fcmp ogt <8 x float> %v0, %v1
508  %ext = sext <8 x i1> %cmp to <8 x i32>
509  store <8 x i32> %ext, ptr %res
510  ret void
511}
512
513define void @v4f64_fcmp_ogt(ptr %res, ptr %a0, ptr %a1) nounwind {
514; CHECK-LABEL: v4f64_fcmp_ogt:
515; CHECK:       # %bb.0:
516; CHECK-NEXT:    xvld $xr0, $a1, 0
517; CHECK-NEXT:    xvld $xr1, $a2, 0
518; CHECK-NEXT:    xvfcmp.clt.d $xr0, $xr1, $xr0
519; CHECK-NEXT:    xvst $xr0, $a0, 0
520; CHECK-NEXT:    ret
521  %v0 = load <4 x double>, ptr %a0
522  %v1 = load <4 x double>, ptr %a1
523  %cmp = fcmp ogt <4 x double> %v0, %v1
524  %ext = sext <4 x i1> %cmp to <4 x i64>
525  store <4 x i64> %ext, ptr %res
526  ret void
527}
528
529;; Expand SETUGT
530define void @v8f32_fcmp_ugt(ptr %res, ptr %a0, ptr %a1) nounwind {
531; CHECK-LABEL: v8f32_fcmp_ugt:
532; CHECK:       # %bb.0:
533; CHECK-NEXT:    xvld $xr0, $a1, 0
534; CHECK-NEXT:    xvld $xr1, $a2, 0
535; CHECK-NEXT:    xvfcmp.cult.s $xr0, $xr1, $xr0
536; CHECK-NEXT:    xvst $xr0, $a0, 0
537; CHECK-NEXT:    ret
538  %v0 = load <8 x float>, ptr %a0
539  %v1 = load <8 x float>, ptr %a1
540  %cmp = fcmp ugt <8 x float> %v0, %v1
541  %ext = sext <8 x i1> %cmp to <8 x i32>
542  store <8 x i32> %ext, ptr %res
543  ret void
544}
545
546define void @v4f64_fcmp_ugt(ptr %res, ptr %a0, ptr %a1) nounwind {
547; CHECK-LABEL: v4f64_fcmp_ugt:
548; CHECK:       # %bb.0:
549; CHECK-NEXT:    xvld $xr0, $a1, 0
550; CHECK-NEXT:    xvld $xr1, $a2, 0
551; CHECK-NEXT:    xvfcmp.cult.d $xr0, $xr1, $xr0
552; CHECK-NEXT:    xvst $xr0, $a0, 0
553; CHECK-NEXT:    ret
554  %v0 = load <4 x double>, ptr %a0
555  %v1 = load <4 x double>, ptr %a1
556  %cmp = fcmp ugt <4 x double> %v0, %v1
557  %ext = sext <4 x i1> %cmp to <4 x i64>
558  store <4 x i64> %ext, ptr %res
559  ret void
560}
561
562;; Expand SETGT
563define void @v8f32_fcmp_gt(ptr %res, ptr %a0, ptr %a1) nounwind {
564; CHECK-LABEL: v8f32_fcmp_gt:
565; CHECK:       # %bb.0:
566; CHECK-NEXT:    xvld $xr0, $a1, 0
567; CHECK-NEXT:    xvld $xr1, $a2, 0
568; CHECK-NEXT:    xvfcmp.clt.s $xr0, $xr1, $xr0
569; CHECK-NEXT:    xvst $xr0, $a0, 0
570; CHECK-NEXT:    ret
571  %v0 = load <8 x float>, ptr %a0
572  %v1 = load <8 x float>, ptr %a1
573  %cmp = fcmp fast ogt <8 x float> %v0, %v1
574  %ext = sext <8 x i1> %cmp to <8 x i32>
575  store <8 x i32> %ext, ptr %res
576  ret void
577}
578
579define void @v4f64_fcmp_gt(ptr %res, ptr %a0, ptr %a1) nounwind {
580; CHECK-LABEL: v4f64_fcmp_gt:
581; CHECK:       # %bb.0:
582; CHECK-NEXT:    xvld $xr0, $a1, 0
583; CHECK-NEXT:    xvld $xr1, $a2, 0
584; CHECK-NEXT:    xvfcmp.clt.d $xr0, $xr1, $xr0
585; CHECK-NEXT:    xvst $xr0, $a0, 0
586; CHECK-NEXT:    ret
587  %v0 = load <4 x double>, ptr %a0
588  %v1 = load <4 x double>, ptr %a1
589  %cmp = fcmp fast ugt <4 x double> %v0, %v1
590  %ext = sext <4 x i1> %cmp to <4 x i64>
591  store <4 x i64> %ext, ptr %res
592  ret void
593}
594
595;; Expand SETOGE
596define void @v8f32_fcmp_oge(ptr %res, ptr %a0, ptr %a1) nounwind {
597; CHECK-LABEL: v8f32_fcmp_oge:
598; CHECK:       # %bb.0:
599; CHECK-NEXT:    xvld $xr0, $a1, 0
600; CHECK-NEXT:    xvld $xr1, $a2, 0
601; CHECK-NEXT:    xvfcmp.cle.s $xr0, $xr1, $xr0
602; CHECK-NEXT:    xvst $xr0, $a0, 0
603; CHECK-NEXT:    ret
604  %v0 = load <8 x float>, ptr %a0
605  %v1 = load <8 x float>, ptr %a1
606  %cmp = fcmp oge <8 x float> %v0, %v1
607  %ext = sext <8 x i1> %cmp to <8 x i32>
608  store <8 x i32> %ext, ptr %res
609  ret void
610}
611
612define void @v4f64_fcmp_oge(ptr %res, ptr %a0, ptr %a1) nounwind {
613; CHECK-LABEL: v4f64_fcmp_oge:
614; CHECK:       # %bb.0:
615; CHECK-NEXT:    xvld $xr0, $a1, 0
616; CHECK-NEXT:    xvld $xr1, $a2, 0
617; CHECK-NEXT:    xvfcmp.cle.d $xr0, $xr1, $xr0
618; CHECK-NEXT:    xvst $xr0, $a0, 0
619; CHECK-NEXT:    ret
620  %v0 = load <4 x double>, ptr %a0
621  %v1 = load <4 x double>, ptr %a1
622  %cmp = fcmp oge <4 x double> %v0, %v1
623  %ext = sext <4 x i1> %cmp to <4 x i64>
624  store <4 x i64> %ext, ptr %res
625  ret void
626}
627
628;; Expand SETUGE
629define void @v8f32_fcmp_uge(ptr %res, ptr %a0, ptr %a1) nounwind {
630; CHECK-LABEL: v8f32_fcmp_uge:
631; CHECK:       # %bb.0:
632; CHECK-NEXT:    xvld $xr0, $a1, 0
633; CHECK-NEXT:    xvld $xr1, $a2, 0
634; CHECK-NEXT:    xvfcmp.cule.s $xr0, $xr1, $xr0
635; CHECK-NEXT:    xvst $xr0, $a0, 0
636; CHECK-NEXT:    ret
637  %v0 = load <8 x float>, ptr %a0
638  %v1 = load <8 x float>, ptr %a1
639  %cmp = fcmp uge <8 x float> %v0, %v1
640  %ext = sext <8 x i1> %cmp to <8 x i32>
641  store <8 x i32> %ext, ptr %res
642  ret void
643}
644
645define void @v4f64_fcmp_uge(ptr %res, ptr %a0, ptr %a1) nounwind {
646; CHECK-LABEL: v4f64_fcmp_uge:
647; CHECK:       # %bb.0:
648; CHECK-NEXT:    xvld $xr0, $a1, 0
649; CHECK-NEXT:    xvld $xr1, $a2, 0
650; CHECK-NEXT:    xvfcmp.cule.d $xr0, $xr1, $xr0
651; CHECK-NEXT:    xvst $xr0, $a0, 0
652; CHECK-NEXT:    ret
653  %v0 = load <4 x double>, ptr %a0
654  %v1 = load <4 x double>, ptr %a1
655  %cmp = fcmp uge <4 x double> %v0, %v1
656  %ext = sext <4 x i1> %cmp to <4 x i64>
657  store <4 x i64> %ext, ptr %res
658  ret void
659}
660
661;; Expand SETGE
662define void @v8f32_fcmp_ge(ptr %res, ptr %a0, ptr %a1) nounwind {
663; CHECK-LABEL: v8f32_fcmp_ge:
664; CHECK:       # %bb.0:
665; CHECK-NEXT:    xvld $xr0, $a1, 0
666; CHECK-NEXT:    xvld $xr1, $a2, 0
667; CHECK-NEXT:    xvfcmp.cle.s $xr0, $xr1, $xr0
668; CHECK-NEXT:    xvst $xr0, $a0, 0
669; CHECK-NEXT:    ret
670  %v0 = load <8 x float>, ptr %a0
671  %v1 = load <8 x float>, ptr %a1
672  %cmp = fcmp fast oge <8 x float> %v0, %v1
673  %ext = sext <8 x i1> %cmp to <8 x i32>
674  store <8 x i32> %ext, ptr %res
675  ret void
676}
677
678define void @v4f64_fcmp_ge(ptr %res, ptr %a0, ptr %a1) nounwind {
679; CHECK-LABEL: v4f64_fcmp_ge:
680; CHECK:       # %bb.0:
681; CHECK-NEXT:    xvld $xr0, $a1, 0
682; CHECK-NEXT:    xvld $xr1, $a2, 0
683; CHECK-NEXT:    xvfcmp.cle.d $xr0, $xr1, $xr0
684; CHECK-NEXT:    xvst $xr0, $a0, 0
685; CHECK-NEXT:    ret
686  %v0 = load <4 x double>, ptr %a0
687  %v1 = load <4 x double>, ptr %a1
688  %cmp = fcmp fast uge <4 x double> %v0, %v1
689  %ext = sext <4 x i1> %cmp to <4 x i64>
690  store <4 x i64> %ext, ptr %res
691  ret void
692}
693