xref: /llvm-project/llvm/test/CodeGen/ARM/vsel-fp16.ll (revision bed1c7f061aa12417aa081e334afdba45767b938)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=armv8a--none-eabi -mattr=+fullfp16 -float-abi=hard | FileCheck %s
3
4@varhalf = global half 0.0
5@vardouble = global double 0.0
6define void @test_vsel32sgt(i32 %lhs, i32 %rhs, ptr %a_ptr, ptr %b_ptr) {
7; CHECK-LABEL: test_vsel32sgt:
8; CHECK:       @ %bb.0:
9; CHECK-NEXT:    vldr.16 s0, [r2]
10; CHECK-NEXT:    vldr.16 s2, [r3]
11; CHECK-NEXT:    cmp r0, r1
12; CHECK-NEXT:    movw r0, :lower16:varhalf
13; CHECK-NEXT:    vselgt.f16 s0, s0, s2
14; CHECK-NEXT:    movt r0, :upper16:varhalf
15; CHECK-NEXT:    vstr.16 s0, [r0]
16; CHECK-NEXT:    bx lr
17  %a = load volatile half, ptr %a_ptr
18  %b = load volatile half, ptr %b_ptr
19  %tst1 = icmp sgt i32 %lhs, %rhs
20  %val1 = select i1 %tst1, half %a, half %b
21  store half %val1, ptr @varhalf
22  ret void
23}
24
25define void @test_vsel32sge(i32 %lhs, i32 %rhs, ptr %a_ptr, ptr %b_ptr) {
26; CHECK-LABEL: test_vsel32sge:
27; CHECK:       @ %bb.0:
28; CHECK-NEXT:    vldr.16 s0, [r2]
29; CHECK-NEXT:    vldr.16 s2, [r3]
30; CHECK-NEXT:    cmp r0, r1
31; CHECK-NEXT:    movw r0, :lower16:varhalf
32; CHECK-NEXT:    vselge.f16 s0, s0, s2
33; CHECK-NEXT:    movt r0, :upper16:varhalf
34; CHECK-NEXT:    vstr.16 s0, [r0]
35; CHECK-NEXT:    bx lr
36  %a = load volatile half, ptr %a_ptr
37  %b = load volatile half, ptr %b_ptr
38  %tst1 = icmp sge i32 %lhs, %rhs
39  %val1 = select i1 %tst1, half %a, half %b
40  store half %val1, ptr @varhalf
41  ret void
42}
43
44define void @test_vsel32eq(i32 %lhs, i32 %rhs, ptr %a_ptr, ptr %b_ptr) {
45; CHECK-LABEL: test_vsel32eq:
46; CHECK:       @ %bb.0:
47; CHECK-NEXT:    vldr.16 s0, [r2]
48; CHECK-NEXT:    vldr.16 s2, [r3]
49; CHECK-NEXT:    cmp r0, r1
50; CHECK-NEXT:    movw r0, :lower16:varhalf
51; CHECK-NEXT:    vseleq.f16 s0, s0, s2
52; CHECK-NEXT:    movt r0, :upper16:varhalf
53; CHECK-NEXT:    vstr.16 s0, [r0]
54; CHECK-NEXT:    bx lr
55  %a = load volatile half, ptr %a_ptr
56  %b = load volatile half, ptr %b_ptr
57  %tst1 = icmp eq i32 %lhs, %rhs
58  %val1 = select i1 %tst1, half %a, half %b
59  store half %val1, ptr @varhalf
60  ret void
61}
62
63define void @test_vsel32slt(i32 %lhs, i32 %rhs, ptr %a_ptr, ptr %b_ptr) {
64; CHECK-LABEL: test_vsel32slt:
65; CHECK:       @ %bb.0:
66; CHECK-NEXT:    vldr.16 s0, [r2]
67; CHECK-NEXT:    vldr.16 s2, [r3]
68; CHECK-NEXT:    cmp r0, r1
69; CHECK-NEXT:    movw r0, :lower16:varhalf
70; CHECK-NEXT:    vselge.f16 s0, s2, s0
71; CHECK-NEXT:    movt r0, :upper16:varhalf
72; CHECK-NEXT:    vstr.16 s0, [r0]
73; CHECK-NEXT:    bx lr
74  %a = load volatile half, ptr %a_ptr
75  %b = load volatile half, ptr %b_ptr
76  %tst1 = icmp slt i32 %lhs, %rhs
77  %val1 = select i1 %tst1, half %a, half %b
78  store half %val1, ptr @varhalf
79  ret void
80}
81
82define void @test_vsel32sle(i32 %lhs, i32 %rhs, ptr %a_ptr, ptr %b_ptr) {
83; CHECK-LABEL: test_vsel32sle:
84; CHECK:       @ %bb.0:
85; CHECK-NEXT:    vldr.16 s0, [r2]
86; CHECK-NEXT:    vldr.16 s2, [r3]
87; CHECK-NEXT:    cmp r0, r1
88; CHECK-NEXT:    movw r0, :lower16:varhalf
89; CHECK-NEXT:    vselgt.f16 s0, s2, s0
90; CHECK-NEXT:    movt r0, :upper16:varhalf
91; CHECK-NEXT:    vstr.16 s0, [r0]
92; CHECK-NEXT:    bx lr
93  %a = load volatile half, ptr %a_ptr
94  %b = load volatile half, ptr %b_ptr
95  %tst1 = icmp sle i32 %lhs, %rhs
96  %val1 = select i1 %tst1, half %a, half %b
97  store half %val1, ptr @varhalf
98  ret void
99}
100
101define void @test_vsel32ogt(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
102; CHECK-LABEL: test_vsel32ogt:
103; CHECK:       @ %bb.0:
104; CHECK-NEXT:    vldr.16 s0, [r2]
105; CHECK-NEXT:    vldr.16 s2, [r3]
106; CHECK-NEXT:    vldr.16 s4, [r0]
107; CHECK-NEXT:    vldr.16 s6, [r1]
108; CHECK-NEXT:    movw r0, :lower16:varhalf
109; CHECK-NEXT:    vcmp.f16 s4, s6
110; CHECK-NEXT:    movt r0, :upper16:varhalf
111; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
112; CHECK-NEXT:    vselgt.f16 s0, s0, s2
113; CHECK-NEXT:    vstr.16 s0, [r0]
114; CHECK-NEXT:    bx lr
115  %a = load volatile half, ptr %a_ptr
116  %b = load volatile half, ptr %b_ptr
117  %lhs = load volatile half, ptr %lhs_ptr
118  %rhs = load volatile half, ptr %rhs_ptr
119  %tst1 = fcmp ogt half %lhs, %rhs
120  %val1 = select i1 %tst1, half %a, half %b
121  store half %val1, ptr @varhalf
122  ret void
123}
124
125define void @test_vsel32oge(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
126; CHECK-LABEL: test_vsel32oge:
127; CHECK:       @ %bb.0:
128; CHECK-NEXT:    vldr.16 s0, [r2]
129; CHECK-NEXT:    vldr.16 s2, [r3]
130; CHECK-NEXT:    vldr.16 s4, [r0]
131; CHECK-NEXT:    vldr.16 s6, [r1]
132; CHECK-NEXT:    movw r0, :lower16:varhalf
133; CHECK-NEXT:    vcmp.f16 s4, s6
134; CHECK-NEXT:    movt r0, :upper16:varhalf
135; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
136; CHECK-NEXT:    vselge.f16 s0, s0, s2
137; CHECK-NEXT:    vstr.16 s0, [r0]
138; CHECK-NEXT:    bx lr
139  %a = load volatile half, ptr %a_ptr
140  %b = load volatile half, ptr %b_ptr
141  %lhs = load volatile half, ptr %lhs_ptr
142  %rhs = load volatile half, ptr %rhs_ptr
143  %tst1 = fcmp oge half %lhs, %rhs
144  %val1 = select i1 %tst1, half %a, half %b
145  store half %val1, ptr @varhalf
146  ret void
147}
148
149define void @test_vsel32oeq(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
150; CHECK-LABEL: test_vsel32oeq:
151; CHECK:       @ %bb.0:
152; CHECK-NEXT:    vldr.16 s0, [r2]
153; CHECK-NEXT:    vldr.16 s2, [r3]
154; CHECK-NEXT:    vldr.16 s4, [r0]
155; CHECK-NEXT:    vldr.16 s6, [r1]
156; CHECK-NEXT:    movw r0, :lower16:varhalf
157; CHECK-NEXT:    vcmp.f16 s4, s6
158; CHECK-NEXT:    movt r0, :upper16:varhalf
159; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
160; CHECK-NEXT:    vseleq.f16 s0, s0, s2
161; CHECK-NEXT:    vstr.16 s0, [r0]
162; CHECK-NEXT:    bx lr
163  %a = load volatile half, ptr %a_ptr
164  %b = load volatile half, ptr %b_ptr
165  %lhs = load volatile half, ptr %lhs_ptr
166  %rhs = load volatile half, ptr %rhs_ptr
167  %tst1 = fcmp oeq half %lhs, %rhs
168  %val1 = select i1 %tst1, half %a, half %b
169  store half %val1, ptr @varhalf
170  ret void
171}
172
173define void @test_vsel32ugt(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
174; CHECK-LABEL: test_vsel32ugt:
175; CHECK:       @ %bb.0:
176; CHECK-NEXT:    vldr.16 s0, [r2]
177; CHECK-NEXT:    vldr.16 s2, [r3]
178; CHECK-NEXT:    vldr.16 s4, [r0]
179; CHECK-NEXT:    vldr.16 s6, [r1]
180; CHECK-NEXT:    movw r0, :lower16:varhalf
181; CHECK-NEXT:    vcmp.f16 s6, s4
182; CHECK-NEXT:    movt r0, :upper16:varhalf
183; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
184; CHECK-NEXT:    vselge.f16 s0, s2, s0
185; CHECK-NEXT:    vstr.16 s0, [r0]
186; CHECK-NEXT:    bx lr
187  %a = load volatile half, ptr %a_ptr
188  %b = load volatile half, ptr %b_ptr
189  %lhs = load volatile half, ptr %lhs_ptr
190  %rhs = load volatile half, ptr %rhs_ptr
191  %tst1 = fcmp ugt half %lhs, %rhs
192  %val1 = select i1 %tst1, half %a, half %b
193  store half %val1, ptr @varhalf
194  ret void
195}
196
197define void @test_vsel32uge(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
198; CHECK-LABEL: test_vsel32uge:
199; CHECK:       @ %bb.0:
200; CHECK-NEXT:    vldr.16 s0, [r2]
201; CHECK-NEXT:    vldr.16 s2, [r3]
202; CHECK-NEXT:    vldr.16 s4, [r0]
203; CHECK-NEXT:    vldr.16 s6, [r1]
204; CHECK-NEXT:    movw r0, :lower16:varhalf
205; CHECK-NEXT:    vcmp.f16 s6, s4
206; CHECK-NEXT:    movt r0, :upper16:varhalf
207; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
208; CHECK-NEXT:    vselgt.f16 s0, s2, s0
209; CHECK-NEXT:    vstr.16 s0, [r0]
210; CHECK-NEXT:    bx lr
211  %a = load volatile half, ptr %a_ptr
212  %b = load volatile half, ptr %b_ptr
213  %lhs = load volatile half, ptr %lhs_ptr
214  %rhs = load volatile half, ptr %rhs_ptr
215  %tst1 = fcmp uge half %lhs, %rhs
216  %val1 = select i1 %tst1, half %a, half %b
217  store half %val1, ptr @varhalf
218  ret void
219}
220
221define void @test_vsel32olt(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
222; CHECK-LABEL: test_vsel32olt:
223; CHECK:       @ %bb.0:
224; CHECK-NEXT:    vldr.16 s0, [r2]
225; CHECK-NEXT:    vldr.16 s2, [r3]
226; CHECK-NEXT:    vldr.16 s4, [r0]
227; CHECK-NEXT:    vldr.16 s6, [r1]
228; CHECK-NEXT:    movw r0, :lower16:varhalf
229; CHECK-NEXT:    vcmp.f16 s6, s4
230; CHECK-NEXT:    movt r0, :upper16:varhalf
231; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
232; CHECK-NEXT:    vselgt.f16 s0, s0, s2
233; CHECK-NEXT:    vstr.16 s0, [r0]
234; CHECK-NEXT:    bx lr
235  %a = load volatile half, ptr %a_ptr
236  %b = load volatile half, ptr %b_ptr
237  %lhs = load volatile half, ptr %lhs_ptr
238  %rhs = load volatile half, ptr %rhs_ptr
239  %tst1 = fcmp olt half %lhs, %rhs
240  %val1 = select i1 %tst1, half %a, half %b
241  store half %val1, ptr @varhalf
242  ret void
243}
244
245define void @test_vsel32ult(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
246; CHECK-LABEL: test_vsel32ult:
247; CHECK:       @ %bb.0:
248; CHECK-NEXT:    vldr.16 s0, [r2]
249; CHECK-NEXT:    vldr.16 s2, [r3]
250; CHECK-NEXT:    vldr.16 s4, [r0]
251; CHECK-NEXT:    vldr.16 s6, [r1]
252; CHECK-NEXT:    movw r0, :lower16:varhalf
253; CHECK-NEXT:    vcmp.f16 s4, s6
254; CHECK-NEXT:    movt r0, :upper16:varhalf
255; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
256; CHECK-NEXT:    vselge.f16 s0, s2, s0
257; CHECK-NEXT:    vstr.16 s0, [r0]
258; CHECK-NEXT:    bx lr
259  %a = load volatile half, ptr %a_ptr
260  %b = load volatile half, ptr %b_ptr
261  %lhs = load volatile half, ptr %lhs_ptr
262  %rhs = load volatile half, ptr %rhs_ptr
263  %tst1 = fcmp ult half %lhs, %rhs
264  %val1 = select i1 %tst1, half %a, half %b
265  store half %val1, ptr @varhalf
266  ret void
267}
268
269define void @test_vsel32ole(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
270; CHECK-LABEL: test_vsel32ole:
271; CHECK:       @ %bb.0:
272; CHECK-NEXT:    vldr.16 s0, [r2]
273; CHECK-NEXT:    vldr.16 s2, [r3]
274; CHECK-NEXT:    vldr.16 s4, [r0]
275; CHECK-NEXT:    vldr.16 s6, [r1]
276; CHECK-NEXT:    movw r0, :lower16:varhalf
277; CHECK-NEXT:    vcmp.f16 s6, s4
278; CHECK-NEXT:    movt r0, :upper16:varhalf
279; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
280; CHECK-NEXT:    vselge.f16 s0, s0, s2
281; CHECK-NEXT:    vstr.16 s0, [r0]
282; CHECK-NEXT:    bx lr
283  %a = load volatile half, ptr %a_ptr
284  %b = load volatile half, ptr %b_ptr
285  %lhs = load volatile half, ptr %lhs_ptr
286  %rhs = load volatile half, ptr %rhs_ptr
287  %tst1 = fcmp ole half %lhs, %rhs
288  %val1 = select i1 %tst1, half %a, half %b
289  store half %val1, ptr @varhalf
290  ret void
291}
292
293define void @test_vsel32ule(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
294; CHECK-LABEL: test_vsel32ule:
295; CHECK:       @ %bb.0:
296; CHECK-NEXT:    vldr.16 s0, [r2]
297; CHECK-NEXT:    vldr.16 s2, [r3]
298; CHECK-NEXT:    vldr.16 s4, [r0]
299; CHECK-NEXT:    vldr.16 s6, [r1]
300; CHECK-NEXT:    movw r0, :lower16:varhalf
301; CHECK-NEXT:    vcmp.f16 s4, s6
302; CHECK-NEXT:    movt r0, :upper16:varhalf
303; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
304; CHECK-NEXT:    vselgt.f16 s0, s2, s0
305; CHECK-NEXT:    vstr.16 s0, [r0]
306; CHECK-NEXT:    bx lr
307  %a = load volatile half, ptr %a_ptr
308  %b = load volatile half, ptr %b_ptr
309  %lhs = load volatile half, ptr %lhs_ptr
310  %rhs = load volatile half, ptr %rhs_ptr
311  %tst1 = fcmp ule half %lhs, %rhs
312  %val1 = select i1 %tst1, half %a, half %b
313  store half %val1, ptr @varhalf
314  ret void
315}
316
317define void @test_vsel32ord(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
318; CHECK-LABEL: test_vsel32ord:
319; CHECK:       @ %bb.0:
320; CHECK-NEXT:    vldr.16 s0, [r2]
321; CHECK-NEXT:    vldr.16 s2, [r3]
322; CHECK-NEXT:    vldr.16 s4, [r0]
323; CHECK-NEXT:    vldr.16 s6, [r1]
324; CHECK-NEXT:    movw r0, :lower16:varhalf
325; CHECK-NEXT:    vcmp.f16 s4, s6
326; CHECK-NEXT:    movt r0, :upper16:varhalf
327; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
328; CHECK-NEXT:    vselvs.f16 s0, s2, s0
329; CHECK-NEXT:    vstr.16 s0, [r0]
330; CHECK-NEXT:    bx lr
331  %a = load volatile half, ptr %a_ptr
332  %b = load volatile half, ptr %b_ptr
333  %lhs = load volatile half, ptr %lhs_ptr
334  %rhs = load volatile half, ptr %rhs_ptr
335  %tst1 = fcmp ord half %lhs, %rhs
336  %val1 = select i1 %tst1, half %a, half %b
337  store half %val1, ptr @varhalf
338  ret void
339}
340
341define void @test_vsel32une(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
342; CHECK-LABEL: test_vsel32une:
343; CHECK:       @ %bb.0:
344; CHECK-NEXT:    vldr.16 s0, [r2]
345; CHECK-NEXT:    vldr.16 s2, [r3]
346; CHECK-NEXT:    vldr.16 s4, [r0]
347; CHECK-NEXT:    vldr.16 s6, [r1]
348; CHECK-NEXT:    movw r0, :lower16:varhalf
349; CHECK-NEXT:    vcmp.f16 s4, s6
350; CHECK-NEXT:    movt r0, :upper16:varhalf
351; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
352; CHECK-NEXT:    vseleq.f16 s0, s2, s0
353; CHECK-NEXT:    vstr.16 s0, [r0]
354; CHECK-NEXT:    bx lr
355  %a = load volatile half, ptr %a_ptr
356  %b = load volatile half, ptr %b_ptr
357  %lhs = load volatile half, ptr %lhs_ptr
358  %rhs = load volatile half, ptr %rhs_ptr
359  %tst1 = fcmp une half %lhs, %rhs
360  %val1 = select i1 %tst1, half %a, half %b
361  store half %val1, ptr @varhalf
362  ret void
363}
364
365define void @test_vsel32uno(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
366; CHECK-LABEL: test_vsel32uno:
367; CHECK:       @ %bb.0:
368; CHECK-NEXT:    vldr.16 s0, [r2]
369; CHECK-NEXT:    vldr.16 s2, [r3]
370; CHECK-NEXT:    vldr.16 s4, [r0]
371; CHECK-NEXT:    vldr.16 s6, [r1]
372; CHECK-NEXT:    movw r0, :lower16:varhalf
373; CHECK-NEXT:    vcmp.f16 s4, s6
374; CHECK-NEXT:    movt r0, :upper16:varhalf
375; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
376; CHECK-NEXT:    vselvs.f16 s0, s0, s2
377; CHECK-NEXT:    vstr.16 s0, [r0]
378; CHECK-NEXT:    bx lr
379  %a = load volatile half, ptr %a_ptr
380  %b = load volatile half, ptr %b_ptr
381  %lhs = load volatile half, ptr %lhs_ptr
382  %rhs = load volatile half, ptr %rhs_ptr
383  %tst1 = fcmp uno half %lhs, %rhs
384  %val1 = select i1 %tst1, half %a, half %b
385  store half %val1, ptr @varhalf
386  ret void
387}
388
389
390define void @test_vsel32ogt_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
391; CHECK-LABEL: test_vsel32ogt_nnan:
392; CHECK:       @ %bb.0:
393; CHECK-NEXT:    vldr.16 s0, [r2]
394; CHECK-NEXT:    vldr.16 s2, [r3]
395; CHECK-NEXT:    vldr.16 s4, [r0]
396; CHECK-NEXT:    vldr.16 s6, [r1]
397; CHECK-NEXT:    movw r0, :lower16:varhalf
398; CHECK-NEXT:    vcmp.f16 s4, s6
399; CHECK-NEXT:    movt r0, :upper16:varhalf
400; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
401; CHECK-NEXT:    vselgt.f16 s0, s0, s2
402; CHECK-NEXT:    vstr.16 s0, [r0]
403; CHECK-NEXT:    bx lr
404  %a = load volatile half, ptr %a_ptr
405  %b = load volatile half, ptr %b_ptr
406  %lhs = load volatile half, ptr %lhs_ptr
407  %rhs = load volatile half, ptr %rhs_ptr
408  %tst1 = fcmp nnan ogt half %lhs, %rhs
409  %val1 = select i1 %tst1, half %a, half %b
410  store half %val1, ptr @varhalf
411  ret void
412}
413
414define void @test_vsel32oge_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
415; CHECK-LABEL: test_vsel32oge_nnan:
416; CHECK:       @ %bb.0:
417; CHECK-NEXT:    vldr.16 s0, [r2]
418; CHECK-NEXT:    vldr.16 s2, [r3]
419; CHECK-NEXT:    vldr.16 s4, [r0]
420; CHECK-NEXT:    vldr.16 s6, [r1]
421; CHECK-NEXT:    movw r0, :lower16:varhalf
422; CHECK-NEXT:    vcmp.f16 s4, s6
423; CHECK-NEXT:    movt r0, :upper16:varhalf
424; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
425; CHECK-NEXT:    vselge.f16 s0, s0, s2
426; CHECK-NEXT:    vstr.16 s0, [r0]
427; CHECK-NEXT:    bx lr
428  %a = load volatile half, ptr %a_ptr
429  %b = load volatile half, ptr %b_ptr
430  %lhs = load volatile half, ptr %lhs_ptr
431  %rhs = load volatile half, ptr %rhs_ptr
432  %tst1 = fcmp nnan oge half %lhs, %rhs
433  %val1 = select i1 %tst1, half %a, half %b
434  store half %val1, ptr @varhalf
435  ret void
436}
437
438define void @test_vsel32oeq_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
439; CHECK-LABEL: test_vsel32oeq_nnan:
440; CHECK:       @ %bb.0:
441; CHECK-NEXT:    vldr.16 s0, [r2]
442; CHECK-NEXT:    vldr.16 s2, [r3]
443; CHECK-NEXT:    vldr.16 s4, [r0]
444; CHECK-NEXT:    vldr.16 s6, [r1]
445; CHECK-NEXT:    movw r0, :lower16:varhalf
446; CHECK-NEXT:    vcmp.f16 s4, s6
447; CHECK-NEXT:    movt r0, :upper16:varhalf
448; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
449; CHECK-NEXT:    vseleq.f16 s0, s0, s2
450; CHECK-NEXT:    vstr.16 s0, [r0]
451; CHECK-NEXT:    bx lr
452  %a = load volatile half, ptr %a_ptr
453  %b = load volatile half, ptr %b_ptr
454  %lhs = load volatile half, ptr %lhs_ptr
455  %rhs = load volatile half, ptr %rhs_ptr
456  %tst1 = fcmp nnan oeq half %lhs, %rhs
457  %val1 = select i1 %tst1, half %a, half %b
458  store half %val1, ptr @varhalf
459  ret void
460}
461
462define void @test_vsel32ugt_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
463; CHECK-LABEL: test_vsel32ugt_nnan:
464; CHECK:       @ %bb.0:
465; CHECK-NEXT:    vldr.16 s0, [r2]
466; CHECK-NEXT:    vldr.16 s2, [r3]
467; CHECK-NEXT:    vldr.16 s4, [r0]
468; CHECK-NEXT:    vldr.16 s6, [r1]
469; CHECK-NEXT:    movw r0, :lower16:varhalf
470; CHECK-NEXT:    vcmp.f16 s4, s6
471; CHECK-NEXT:    movt r0, :upper16:varhalf
472; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
473; CHECK-NEXT:    vselgt.f16 s0, s0, s2
474; CHECK-NEXT:    vstr.16 s0, [r0]
475; CHECK-NEXT:    bx lr
476  %a = load volatile half, ptr %a_ptr
477  %b = load volatile half, ptr %b_ptr
478  %lhs = load volatile half, ptr %lhs_ptr
479  %rhs = load volatile half, ptr %rhs_ptr
480  %tst1 = fcmp nnan ugt half %lhs, %rhs
481  %val1 = select i1 %tst1, half %a, half %b
482  store half %val1, ptr @varhalf
483  ret void
484}
485
486define void @test_vsel32uge_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
487; CHECK-LABEL: test_vsel32uge_nnan:
488; CHECK:       @ %bb.0:
489; CHECK-NEXT:    vldr.16 s0, [r2]
490; CHECK-NEXT:    vldr.16 s2, [r3]
491; CHECK-NEXT:    vldr.16 s4, [r0]
492; CHECK-NEXT:    vldr.16 s6, [r1]
493; CHECK-NEXT:    movw r0, :lower16:varhalf
494; CHECK-NEXT:    vcmp.f16 s4, s6
495; CHECK-NEXT:    movt r0, :upper16:varhalf
496; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
497; CHECK-NEXT:    vselge.f16 s0, s0, s2
498; CHECK-NEXT:    vstr.16 s0, [r0]
499; CHECK-NEXT:    bx lr
500  %a = load volatile half, ptr %a_ptr
501  %b = load volatile half, ptr %b_ptr
502  %lhs = load volatile half, ptr %lhs_ptr
503  %rhs = load volatile half, ptr %rhs_ptr
504  %tst1 = fcmp nnan uge half %lhs, %rhs
505  %val1 = select i1 %tst1, half %a, half %b
506  store half %val1, ptr @varhalf
507  ret void
508}
509
510define void @test_vsel32olt_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
511; CHECK-LABEL: test_vsel32olt_nnan:
512; CHECK:       @ %bb.0:
513; CHECK-NEXT:    vldr.16 s0, [r2]
514; CHECK-NEXT:    vldr.16 s2, [r3]
515; CHECK-NEXT:    vldr.16 s4, [r0]
516; CHECK-NEXT:    vldr.16 s6, [r1]
517; CHECK-NEXT:    movw r0, :lower16:varhalf
518; CHECK-NEXT:    vcmp.f16 s6, s4
519; CHECK-NEXT:    movt r0, :upper16:varhalf
520; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
521; CHECK-NEXT:    vselgt.f16 s0, s0, s2
522; CHECK-NEXT:    vstr.16 s0, [r0]
523; CHECK-NEXT:    bx lr
524  %a = load volatile half, ptr %a_ptr
525  %b = load volatile half, ptr %b_ptr
526  %lhs = load volatile half, ptr %lhs_ptr
527  %rhs = load volatile half, ptr %rhs_ptr
528  %tst1 = fcmp nnan olt half %lhs, %rhs
529  %val1 = select i1 %tst1, half %a, half %b
530  store half %val1, ptr @varhalf
531  ret void
532}
533
534define void @test_vsel32ult_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
535; CHECK-LABEL: test_vsel32ult_nnan:
536; CHECK:       @ %bb.0:
537; CHECK-NEXT:    vldr.16 s0, [r2]
538; CHECK-NEXT:    vldr.16 s2, [r3]
539; CHECK-NEXT:    vldr.16 s4, [r0]
540; CHECK-NEXT:    vldr.16 s6, [r1]
541; CHECK-NEXT:    movw r0, :lower16:varhalf
542; CHECK-NEXT:    vcmp.f16 s6, s4
543; CHECK-NEXT:    movt r0, :upper16:varhalf
544; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
545; CHECK-NEXT:    vselgt.f16 s0, s0, s2
546; CHECK-NEXT:    vstr.16 s0, [r0]
547; CHECK-NEXT:    bx lr
548  %a = load volatile half, ptr %a_ptr
549  %b = load volatile half, ptr %b_ptr
550  %lhs = load volatile half, ptr %lhs_ptr
551  %rhs = load volatile half, ptr %rhs_ptr
552  %tst1 = fcmp nnan ult half %lhs, %rhs
553  %val1 = select i1 %tst1, half %a, half %b
554  store half %val1, ptr @varhalf
555  ret void
556}
557
558define void @test_vsel32ole_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
559; CHECK-LABEL: test_vsel32ole_nnan:
560; CHECK:       @ %bb.0:
561; CHECK-NEXT:    vldr.16 s0, [r2]
562; CHECK-NEXT:    vldr.16 s2, [r3]
563; CHECK-NEXT:    vldr.16 s4, [r0]
564; CHECK-NEXT:    vldr.16 s6, [r1]
565; CHECK-NEXT:    movw r0, :lower16:varhalf
566; CHECK-NEXT:    vcmp.f16 s6, s4
567; CHECK-NEXT:    movt r0, :upper16:varhalf
568; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
569; CHECK-NEXT:    vselge.f16 s0, s0, s2
570; CHECK-NEXT:    vstr.16 s0, [r0]
571; CHECK-NEXT:    bx lr
572  %a = load volatile half, ptr %a_ptr
573  %b = load volatile half, ptr %b_ptr
574  %lhs = load volatile half, ptr %lhs_ptr
575  %rhs = load volatile half, ptr %rhs_ptr
576  %tst1 = fcmp nnan ole half %lhs, %rhs
577  %val1 = select i1 %tst1, half %a, half %b
578  store half %val1, ptr @varhalf
579  ret void
580}
581
582define void @test_vsel32ule_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
583; CHECK-LABEL: test_vsel32ule_nnan:
584; CHECK:       @ %bb.0:
585; CHECK-NEXT:    vldr.16 s0, [r2]
586; CHECK-NEXT:    vldr.16 s2, [r3]
587; CHECK-NEXT:    vldr.16 s4, [r0]
588; CHECK-NEXT:    vldr.16 s6, [r1]
589; CHECK-NEXT:    movw r0, :lower16:varhalf
590; CHECK-NEXT:    vcmp.f16 s6, s4
591; CHECK-NEXT:    movt r0, :upper16:varhalf
592; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
593; CHECK-NEXT:    vselge.f16 s0, s0, s2
594; CHECK-NEXT:    vstr.16 s0, [r0]
595; CHECK-NEXT:    bx lr
596  %a = load volatile half, ptr %a_ptr
597  %b = load volatile half, ptr %b_ptr
598  %lhs = load volatile half, ptr %lhs_ptr
599  %rhs = load volatile half, ptr %rhs_ptr
600  %tst1 = fcmp nnan ule half %lhs, %rhs
601  %val1 = select i1 %tst1, half %a, half %b
602  store half %val1, ptr @varhalf
603  ret void
604}
605
606define void @test_vsel32ord_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
607; CHECK-LABEL: test_vsel32ord_nnan:
608; CHECK:       @ %bb.0:
609; CHECK-NEXT:    vldr.16 s0, [r2]
610; CHECK-NEXT:    vldr.16 s2, [r3]
611; CHECK-NEXT:    vldr.16 s4, [r0]
612; CHECK-NEXT:    vldr.16 s6, [r1]
613; CHECK-NEXT:    movw r0, :lower16:varhalf
614; CHECK-NEXT:    vcmp.f16 s4, s6
615; CHECK-NEXT:    movt r0, :upper16:varhalf
616; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
617; CHECK-NEXT:    vselvs.f16 s0, s2, s0
618; CHECK-NEXT:    vstr.16 s0, [r0]
619; CHECK-NEXT:    bx lr
620  %a = load volatile half, ptr %a_ptr
621  %b = load volatile half, ptr %b_ptr
622  %lhs = load volatile half, ptr %lhs_ptr
623  %rhs = load volatile half, ptr %rhs_ptr
624  %tst1 = fcmp nnan ord half %lhs, %rhs
625  %val1 = select i1 %tst1, half %a, half %b
626  store half %val1, ptr @varhalf
627  ret void
628}
629
630define void @test_vsel32une_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
631; CHECK-LABEL: test_vsel32une_nnan:
632; CHECK:       @ %bb.0:
633; CHECK-NEXT:    vldr.16 s0, [r2]
634; CHECK-NEXT:    vldr.16 s2, [r3]
635; CHECK-NEXT:    vldr.16 s4, [r0]
636; CHECK-NEXT:    vldr.16 s6, [r1]
637; CHECK-NEXT:    movw r0, :lower16:varhalf
638; CHECK-NEXT:    vcmp.f16 s4, s6
639; CHECK-NEXT:    movt r0, :upper16:varhalf
640; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
641; CHECK-NEXT:    vseleq.f16 s0, s2, s0
642; CHECK-NEXT:    vstr.16 s0, [r0]
643; CHECK-NEXT:    bx lr
644  %a = load volatile half, ptr %a_ptr
645  %b = load volatile half, ptr %b_ptr
646  %lhs = load volatile half, ptr %lhs_ptr
647  %rhs = load volatile half, ptr %rhs_ptr
648  %tst1 = fcmp nnan une half %lhs, %rhs
649  %val1 = select i1 %tst1, half %a, half %b
650  store half %val1, ptr @varhalf
651  ret void
652}
653
654define void @test_vsel32uno_nnan(ptr %lhs_ptr, ptr %rhs_ptr, ptr %a_ptr, ptr %b_ptr) {
655; CHECK-LABEL: test_vsel32uno_nnan:
656; CHECK:       @ %bb.0:
657; CHECK-NEXT:    vldr.16 s0, [r2]
658; CHECK-NEXT:    vldr.16 s2, [r3]
659; CHECK-NEXT:    vldr.16 s4, [r0]
660; CHECK-NEXT:    vldr.16 s6, [r1]
661; CHECK-NEXT:    movw r0, :lower16:varhalf
662; CHECK-NEXT:    vcmp.f16 s4, s6
663; CHECK-NEXT:    movt r0, :upper16:varhalf
664; CHECK-NEXT:    vmrs APSR_nzcv, fpscr
665; CHECK-NEXT:    vselvs.f16 s0, s0, s2
666; CHECK-NEXT:    vstr.16 s0, [r0]
667; CHECK-NEXT:    bx lr
668  %a = load volatile half, ptr %a_ptr
669  %b = load volatile half, ptr %b_ptr
670  %lhs = load volatile half, ptr %lhs_ptr
671  %rhs = load volatile half, ptr %rhs_ptr
672  %tst1 = fcmp nnan uno half %lhs, %rhs
673  %val1 = select i1 %tst1, half %a, half %b
674  store half %val1, ptr @varhalf
675  ret void
676}
677