xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+v -verify-machineinstrs < %s \
3; RUN:   -target-abi=lp64d -riscv-v-vector-bits-min=0 | FileCheck --check-prefix=CHECK-NOV %s
4; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+v -verify-machineinstrs < %s \
5; RUN:   -target-abi=lp64d -riscv-v-vector-bits-min=-1 | FileCheck --check-prefix=CHECK-V %s
6
7; i32 saturate
8
9define <2 x i32> @stest_f64i32(<2 x double> %x) {
10; CHECK-NOV-LABEL: stest_f64i32:
11; CHECK-NOV:       # %bb.0: # %entry
12; CHECK-NOV-NEXT:    fcvt.l.d a1, fa1, rtz
13; CHECK-NOV-NEXT:    lui a2, 524288
14; CHECK-NOV-NEXT:    addiw a3, a2, -1
15; CHECK-NOV-NEXT:    fcvt.l.d a0, fa0, rtz
16; CHECK-NOV-NEXT:    bge a1, a3, .LBB0_5
17; CHECK-NOV-NEXT:  # %bb.1: # %entry
18; CHECK-NOV-NEXT:    bge a0, a3, .LBB0_6
19; CHECK-NOV-NEXT:  .LBB0_2: # %entry
20; CHECK-NOV-NEXT:    bge a2, a0, .LBB0_7
21; CHECK-NOV-NEXT:  .LBB0_3: # %entry
22; CHECK-NOV-NEXT:    bge a2, a1, .LBB0_8
23; CHECK-NOV-NEXT:  .LBB0_4: # %entry
24; CHECK-NOV-NEXT:    ret
25; CHECK-NOV-NEXT:  .LBB0_5: # %entry
26; CHECK-NOV-NEXT:    mv a1, a3
27; CHECK-NOV-NEXT:    blt a0, a3, .LBB0_2
28; CHECK-NOV-NEXT:  .LBB0_6: # %entry
29; CHECK-NOV-NEXT:    mv a0, a3
30; CHECK-NOV-NEXT:    blt a2, a3, .LBB0_3
31; CHECK-NOV-NEXT:  .LBB0_7: # %entry
32; CHECK-NOV-NEXT:    lui a0, 524288
33; CHECK-NOV-NEXT:    blt a2, a1, .LBB0_4
34; CHECK-NOV-NEXT:  .LBB0_8: # %entry
35; CHECK-NOV-NEXT:    lui a1, 524288
36; CHECK-NOV-NEXT:    ret
37;
38; CHECK-V-LABEL: stest_f64i32:
39; CHECK-V:       # %bb.0: # %entry
40; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
41; CHECK-V-NEXT:    vfcvt.rtz.x.f.v v8, v8
42; CHECK-V-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
43; CHECK-V-NEXT:    vnclip.wi v8, v8, 0
44; CHECK-V-NEXT:    ret
45entry:
46  %conv = fptosi <2 x double> %x to <2 x i64>
47  %0 = icmp slt <2 x i64> %conv, <i64 2147483647, i64 2147483647>
48  %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> <i64 2147483647, i64 2147483647>
49  %1 = icmp sgt <2 x i64> %spec.store.select, <i64 -2147483648, i64 -2147483648>
50  %spec.store.select7 = select <2 x i1> %1, <2 x i64> %spec.store.select, <2 x i64> <i64 -2147483648, i64 -2147483648>
51  %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32>
52  ret <2 x i32> %conv6
53}
54
55define <2 x i32> @utest_f64i32(<2 x double> %x) {
56; CHECK-NOV-LABEL: utest_f64i32:
57; CHECK-NOV:       # %bb.0: # %entry
58; CHECK-NOV-NEXT:    fcvt.lu.d a0, fa0, rtz
59; CHECK-NOV-NEXT:    li a2, -1
60; CHECK-NOV-NEXT:    srli a2, a2, 32
61; CHECK-NOV-NEXT:    fcvt.lu.d a1, fa1, rtz
62; CHECK-NOV-NEXT:    bgeu a0, a2, .LBB1_3
63; CHECK-NOV-NEXT:  # %bb.1: # %entry
64; CHECK-NOV-NEXT:    bgeu a1, a2, .LBB1_4
65; CHECK-NOV-NEXT:  .LBB1_2: # %entry
66; CHECK-NOV-NEXT:    ret
67; CHECK-NOV-NEXT:  .LBB1_3: # %entry
68; CHECK-NOV-NEXT:    mv a0, a2
69; CHECK-NOV-NEXT:    bltu a1, a2, .LBB1_2
70; CHECK-NOV-NEXT:  .LBB1_4: # %entry
71; CHECK-NOV-NEXT:    mv a1, a2
72; CHECK-NOV-NEXT:    ret
73;
74; CHECK-V-LABEL: utest_f64i32:
75; CHECK-V:       # %bb.0: # %entry
76; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
77; CHECK-V-NEXT:    vfcvt.rtz.xu.f.v v8, v8
78; CHECK-V-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
79; CHECK-V-NEXT:    vnclipu.wi v8, v8, 0
80; CHECK-V-NEXT:    ret
81entry:
82  %conv = fptoui <2 x double> %x to <2 x i64>
83  %0 = icmp ult <2 x i64> %conv, <i64 4294967295, i64 4294967295>
84  %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295>
85  %conv6 = trunc <2 x i64> %spec.store.select to <2 x i32>
86  ret <2 x i32> %conv6
87}
88
89define <2 x i32> @ustest_f64i32(<2 x double> %x) {
90; CHECK-NOV-LABEL: ustest_f64i32:
91; CHECK-NOV:       # %bb.0: # %entry
92; CHECK-NOV-NEXT:    fcvt.l.d a1, fa1, rtz
93; CHECK-NOV-NEXT:    li a2, -1
94; CHECK-NOV-NEXT:    srli a2, a2, 32
95; CHECK-NOV-NEXT:    fcvt.l.d a0, fa0, rtz
96; CHECK-NOV-NEXT:    blt a1, a2, .LBB2_2
97; CHECK-NOV-NEXT:  # %bb.1: # %entry
98; CHECK-NOV-NEXT:    mv a1, a2
99; CHECK-NOV-NEXT:  .LBB2_2: # %entry
100; CHECK-NOV-NEXT:    blt a0, a2, .LBB2_4
101; CHECK-NOV-NEXT:  # %bb.3: # %entry
102; CHECK-NOV-NEXT:    mv a0, a2
103; CHECK-NOV-NEXT:  .LBB2_4: # %entry
104; CHECK-NOV-NEXT:    sgtz a2, a1
105; CHECK-NOV-NEXT:    sgtz a3, a0
106; CHECK-NOV-NEXT:    neg a3, a3
107; CHECK-NOV-NEXT:    neg a2, a2
108; CHECK-NOV-NEXT:    and a0, a3, a0
109; CHECK-NOV-NEXT:    and a1, a2, a1
110; CHECK-NOV-NEXT:    ret
111;
112; CHECK-V-LABEL: ustest_f64i32:
113; CHECK-V:       # %bb.0: # %entry
114; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
115; CHECK-V-NEXT:    vfcvt.rtz.x.f.v v8, v8
116; CHECK-V-NEXT:    vmax.vx v8, v8, zero
117; CHECK-V-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
118; CHECK-V-NEXT:    vnclipu.wi v8, v8, 0
119; CHECK-V-NEXT:    ret
120entry:
121  %conv = fptosi <2 x double> %x to <2 x i64>
122  %0 = icmp slt <2 x i64> %conv, <i64 4294967295, i64 4294967295>
123  %spec.store.select = select <2 x i1> %0, <2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295>
124  %1 = icmp sgt <2 x i64> %spec.store.select, zeroinitializer
125  %spec.store.select7 = select <2 x i1> %1, <2 x i64> %spec.store.select, <2 x i64> zeroinitializer
126  %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32>
127  ret <2 x i32> %conv6
128}
129
130define <4 x i32> @stest_f32i32(<4 x float> %x) {
131; CHECK-NOV-LABEL: stest_f32i32:
132; CHECK-NOV:       # %bb.0: # %entry
133; CHECK-NOV-NEXT:    fcvt.l.s a1, fa3, rtz
134; CHECK-NOV-NEXT:    lui a3, 524288
135; CHECK-NOV-NEXT:    addiw a6, a3, -1
136; CHECK-NOV-NEXT:    fcvt.l.s a2, fa2, rtz
137; CHECK-NOV-NEXT:    bge a1, a6, .LBB3_10
138; CHECK-NOV-NEXT:  # %bb.1: # %entry
139; CHECK-NOV-NEXT:    fcvt.l.s a4, fa1, rtz
140; CHECK-NOV-NEXT:    bge a2, a6, .LBB3_11
141; CHECK-NOV-NEXT:  .LBB3_2: # %entry
142; CHECK-NOV-NEXT:    fcvt.l.s a5, fa0, rtz
143; CHECK-NOV-NEXT:    bge a4, a6, .LBB3_12
144; CHECK-NOV-NEXT:  .LBB3_3: # %entry
145; CHECK-NOV-NEXT:    bge a5, a6, .LBB3_13
146; CHECK-NOV-NEXT:  .LBB3_4: # %entry
147; CHECK-NOV-NEXT:    bge a3, a5, .LBB3_14
148; CHECK-NOV-NEXT:  .LBB3_5: # %entry
149; CHECK-NOV-NEXT:    bge a3, a4, .LBB3_15
150; CHECK-NOV-NEXT:  .LBB3_6: # %entry
151; CHECK-NOV-NEXT:    bge a3, a2, .LBB3_16
152; CHECK-NOV-NEXT:  .LBB3_7: # %entry
153; CHECK-NOV-NEXT:    blt a3, a1, .LBB3_9
154; CHECK-NOV-NEXT:  .LBB3_8: # %entry
155; CHECK-NOV-NEXT:    lui a1, 524288
156; CHECK-NOV-NEXT:  .LBB3_9: # %entry
157; CHECK-NOV-NEXT:    sw a5, 0(a0)
158; CHECK-NOV-NEXT:    sw a4, 4(a0)
159; CHECK-NOV-NEXT:    sw a2, 8(a0)
160; CHECK-NOV-NEXT:    sw a1, 12(a0)
161; CHECK-NOV-NEXT:    ret
162; CHECK-NOV-NEXT:  .LBB3_10: # %entry
163; CHECK-NOV-NEXT:    mv a1, a6
164; CHECK-NOV-NEXT:    fcvt.l.s a4, fa1, rtz
165; CHECK-NOV-NEXT:    blt a2, a6, .LBB3_2
166; CHECK-NOV-NEXT:  .LBB3_11: # %entry
167; CHECK-NOV-NEXT:    mv a2, a6
168; CHECK-NOV-NEXT:    fcvt.l.s a5, fa0, rtz
169; CHECK-NOV-NEXT:    blt a4, a6, .LBB3_3
170; CHECK-NOV-NEXT:  .LBB3_12: # %entry
171; CHECK-NOV-NEXT:    mv a4, a6
172; CHECK-NOV-NEXT:    blt a5, a6, .LBB3_4
173; CHECK-NOV-NEXT:  .LBB3_13: # %entry
174; CHECK-NOV-NEXT:    mv a5, a6
175; CHECK-NOV-NEXT:    blt a3, a6, .LBB3_5
176; CHECK-NOV-NEXT:  .LBB3_14: # %entry
177; CHECK-NOV-NEXT:    lui a5, 524288
178; CHECK-NOV-NEXT:    blt a3, a4, .LBB3_6
179; CHECK-NOV-NEXT:  .LBB3_15: # %entry
180; CHECK-NOV-NEXT:    lui a4, 524288
181; CHECK-NOV-NEXT:    blt a3, a2, .LBB3_7
182; CHECK-NOV-NEXT:  .LBB3_16: # %entry
183; CHECK-NOV-NEXT:    lui a2, 524288
184; CHECK-NOV-NEXT:    bge a3, a1, .LBB3_8
185; CHECK-NOV-NEXT:    j .LBB3_9
186;
187; CHECK-V-LABEL: stest_f32i32:
188; CHECK-V:       # %bb.0: # %entry
189; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
190; CHECK-V-NEXT:    vfwcvt.rtz.x.f.v v10, v8
191; CHECK-V-NEXT:    vnclip.wi v8, v10, 0
192; CHECK-V-NEXT:    ret
193entry:
194  %conv = fptosi <4 x float> %x to <4 x i64>
195  %0 = icmp slt <4 x i64> %conv, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
196  %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
197  %1 = icmp sgt <4 x i64> %spec.store.select, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
198  %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
199  %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
200  ret <4 x i32> %conv6
201}
202
203define <4 x i32> @utest_f32i32(<4 x float> %x) {
204; CHECK-NOV-LABEL: utest_f32i32:
205; CHECK-NOV:       # %bb.0: # %entry
206; CHECK-NOV-NEXT:    fcvt.lu.s a1, fa0, rtz
207; CHECK-NOV-NEXT:    li a3, -1
208; CHECK-NOV-NEXT:    srli a3, a3, 32
209; CHECK-NOV-NEXT:    fcvt.lu.s a2, fa1, rtz
210; CHECK-NOV-NEXT:    bgeu a1, a3, .LBB4_6
211; CHECK-NOV-NEXT:  # %bb.1: # %entry
212; CHECK-NOV-NEXT:    fcvt.lu.s a4, fa2, rtz
213; CHECK-NOV-NEXT:    bgeu a2, a3, .LBB4_7
214; CHECK-NOV-NEXT:  .LBB4_2: # %entry
215; CHECK-NOV-NEXT:    fcvt.lu.s a5, fa3, rtz
216; CHECK-NOV-NEXT:    bgeu a4, a3, .LBB4_8
217; CHECK-NOV-NEXT:  .LBB4_3: # %entry
218; CHECK-NOV-NEXT:    bltu a5, a3, .LBB4_5
219; CHECK-NOV-NEXT:  .LBB4_4: # %entry
220; CHECK-NOV-NEXT:    mv a5, a3
221; CHECK-NOV-NEXT:  .LBB4_5: # %entry
222; CHECK-NOV-NEXT:    sw a1, 0(a0)
223; CHECK-NOV-NEXT:    sw a2, 4(a0)
224; CHECK-NOV-NEXT:    sw a4, 8(a0)
225; CHECK-NOV-NEXT:    sw a5, 12(a0)
226; CHECK-NOV-NEXT:    ret
227; CHECK-NOV-NEXT:  .LBB4_6: # %entry
228; CHECK-NOV-NEXT:    mv a1, a3
229; CHECK-NOV-NEXT:    fcvt.lu.s a4, fa2, rtz
230; CHECK-NOV-NEXT:    bltu a2, a3, .LBB4_2
231; CHECK-NOV-NEXT:  .LBB4_7: # %entry
232; CHECK-NOV-NEXT:    mv a2, a3
233; CHECK-NOV-NEXT:    fcvt.lu.s a5, fa3, rtz
234; CHECK-NOV-NEXT:    bltu a4, a3, .LBB4_3
235; CHECK-NOV-NEXT:  .LBB4_8: # %entry
236; CHECK-NOV-NEXT:    mv a4, a3
237; CHECK-NOV-NEXT:    bgeu a5, a3, .LBB4_4
238; CHECK-NOV-NEXT:    j .LBB4_5
239;
240; CHECK-V-LABEL: utest_f32i32:
241; CHECK-V:       # %bb.0: # %entry
242; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
243; CHECK-V-NEXT:    vfwcvt.rtz.xu.f.v v10, v8
244; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
245; CHECK-V-NEXT:    ret
246entry:
247  %conv = fptoui <4 x float> %x to <4 x i64>
248  %0 = icmp ult <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
249  %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
250  %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32>
251  ret <4 x i32> %conv6
252}
253
254define <4 x i32> @ustest_f32i32(<4 x float> %x) {
255; CHECK-NOV-LABEL: ustest_f32i32:
256; CHECK-NOV:       # %bb.0: # %entry
257; CHECK-NOV-NEXT:    fcvt.l.s a1, fa3, rtz
258; CHECK-NOV-NEXT:    li a4, -1
259; CHECK-NOV-NEXT:    srli a4, a4, 32
260; CHECK-NOV-NEXT:    fcvt.l.s a2, fa2, rtz
261; CHECK-NOV-NEXT:    bge a1, a4, .LBB5_6
262; CHECK-NOV-NEXT:  # %bb.1: # %entry
263; CHECK-NOV-NEXT:    fcvt.l.s a3, fa1, rtz
264; CHECK-NOV-NEXT:    bge a2, a4, .LBB5_7
265; CHECK-NOV-NEXT:  .LBB5_2: # %entry
266; CHECK-NOV-NEXT:    fcvt.l.s a5, fa0, rtz
267; CHECK-NOV-NEXT:    bge a3, a4, .LBB5_8
268; CHECK-NOV-NEXT:  .LBB5_3: # %entry
269; CHECK-NOV-NEXT:    blt a5, a4, .LBB5_5
270; CHECK-NOV-NEXT:  .LBB5_4: # %entry
271; CHECK-NOV-NEXT:    mv a5, a4
272; CHECK-NOV-NEXT:  .LBB5_5: # %entry
273; CHECK-NOV-NEXT:    sgtz a4, a1
274; CHECK-NOV-NEXT:    sgtz a6, a2
275; CHECK-NOV-NEXT:    sgtz a7, a3
276; CHECK-NOV-NEXT:    sgtz t0, a5
277; CHECK-NOV-NEXT:    negw t0, t0
278; CHECK-NOV-NEXT:    negw a7, a7
279; CHECK-NOV-NEXT:    negw a6, a6
280; CHECK-NOV-NEXT:    negw a4, a4
281; CHECK-NOV-NEXT:    and a5, t0, a5
282; CHECK-NOV-NEXT:    and a3, a7, a3
283; CHECK-NOV-NEXT:    and a2, a6, a2
284; CHECK-NOV-NEXT:    and a1, a4, a1
285; CHECK-NOV-NEXT:    sw a5, 0(a0)
286; CHECK-NOV-NEXT:    sw a3, 4(a0)
287; CHECK-NOV-NEXT:    sw a2, 8(a0)
288; CHECK-NOV-NEXT:    sw a1, 12(a0)
289; CHECK-NOV-NEXT:    ret
290; CHECK-NOV-NEXT:  .LBB5_6: # %entry
291; CHECK-NOV-NEXT:    mv a1, a4
292; CHECK-NOV-NEXT:    fcvt.l.s a3, fa1, rtz
293; CHECK-NOV-NEXT:    blt a2, a4, .LBB5_2
294; CHECK-NOV-NEXT:  .LBB5_7: # %entry
295; CHECK-NOV-NEXT:    mv a2, a4
296; CHECK-NOV-NEXT:    fcvt.l.s a5, fa0, rtz
297; CHECK-NOV-NEXT:    blt a3, a4, .LBB5_3
298; CHECK-NOV-NEXT:  .LBB5_8: # %entry
299; CHECK-NOV-NEXT:    mv a3, a4
300; CHECK-NOV-NEXT:    bge a5, a4, .LBB5_4
301; CHECK-NOV-NEXT:    j .LBB5_5
302;
303; CHECK-V-LABEL: ustest_f32i32:
304; CHECK-V:       # %bb.0: # %entry
305; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
306; CHECK-V-NEXT:    vfwcvt.rtz.x.f.v v10, v8
307; CHECK-V-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
308; CHECK-V-NEXT:    vmax.vx v10, v10, zero
309; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
310; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
311; CHECK-V-NEXT:    ret
312entry:
313  %conv = fptosi <4 x float> %x to <4 x i64>
314  %0 = icmp slt <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
315  %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
316  %1 = icmp sgt <4 x i64> %spec.store.select, zeroinitializer
317  %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> zeroinitializer
318  %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
319  ret <4 x i32> %conv6
320}
321
322define <4 x i32> @stest_f16i32(<4 x half> %x) {
323; CHECK-NOV-LABEL: stest_f16i32:
324; CHECK-NOV:       # %bb.0: # %entry
325; CHECK-NOV-NEXT:    addi sp, sp, -64
326; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 64
327; CHECK-NOV-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
328; CHECK-NOV-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
329; CHECK-NOV-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
330; CHECK-NOV-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
331; CHECK-NOV-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
332; CHECK-NOV-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
333; CHECK-NOV-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
334; CHECK-NOV-NEXT:    fsd fs2, 0(sp) # 8-byte Folded Spill
335; CHECK-NOV-NEXT:    .cfi_offset ra, -8
336; CHECK-NOV-NEXT:    .cfi_offset s0, -16
337; CHECK-NOV-NEXT:    .cfi_offset s1, -24
338; CHECK-NOV-NEXT:    .cfi_offset s2, -32
339; CHECK-NOV-NEXT:    .cfi_offset s3, -40
340; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
341; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
342; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
343; CHECK-NOV-NEXT:    .cfi_remember_state
344; CHECK-NOV-NEXT:    lhu s1, 0(a1)
345; CHECK-NOV-NEXT:    lhu s2, 8(a1)
346; CHECK-NOV-NEXT:    lhu a2, 16(a1)
347; CHECK-NOV-NEXT:    lhu s3, 24(a1)
348; CHECK-NOV-NEXT:    mv s0, a0
349; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
350; CHECK-NOV-NEXT:    call __extendhfsf2
351; CHECK-NOV-NEXT:    fmv.s fs2, fa0
352; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
353; CHECK-NOV-NEXT:    call __extendhfsf2
354; CHECK-NOV-NEXT:    fmv.s fs1, fa0
355; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
356; CHECK-NOV-NEXT:    call __extendhfsf2
357; CHECK-NOV-NEXT:    fmv.s fs0, fa0
358; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
359; CHECK-NOV-NEXT:    fcvt.l.s s1, fs2, rtz
360; CHECK-NOV-NEXT:    call __extendhfsf2
361; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
362; CHECK-NOV-NEXT:    lui a1, 524288
363; CHECK-NOV-NEXT:    addiw a4, a1, -1
364; CHECK-NOV-NEXT:    bge a0, a4, .LBB6_10
365; CHECK-NOV-NEXT:  # %bb.1: # %entry
366; CHECK-NOV-NEXT:    fcvt.l.s a2, fs1, rtz
367; CHECK-NOV-NEXT:    bge s1, a4, .LBB6_11
368; CHECK-NOV-NEXT:  .LBB6_2: # %entry
369; CHECK-NOV-NEXT:    fcvt.l.s a3, fs0, rtz
370; CHECK-NOV-NEXT:    bge a2, a4, .LBB6_12
371; CHECK-NOV-NEXT:  .LBB6_3: # %entry
372; CHECK-NOV-NEXT:    bge a3, a4, .LBB6_13
373; CHECK-NOV-NEXT:  .LBB6_4: # %entry
374; CHECK-NOV-NEXT:    bge a1, a3, .LBB6_14
375; CHECK-NOV-NEXT:  .LBB6_5: # %entry
376; CHECK-NOV-NEXT:    bge a1, a2, .LBB6_15
377; CHECK-NOV-NEXT:  .LBB6_6: # %entry
378; CHECK-NOV-NEXT:    bge a1, s1, .LBB6_16
379; CHECK-NOV-NEXT:  .LBB6_7: # %entry
380; CHECK-NOV-NEXT:    blt a1, a0, .LBB6_9
381; CHECK-NOV-NEXT:  .LBB6_8: # %entry
382; CHECK-NOV-NEXT:    lui a0, 524288
383; CHECK-NOV-NEXT:  .LBB6_9: # %entry
384; CHECK-NOV-NEXT:    sw a3, 0(s0)
385; CHECK-NOV-NEXT:    sw a2, 4(s0)
386; CHECK-NOV-NEXT:    sw s1, 8(s0)
387; CHECK-NOV-NEXT:    sw a0, 12(s0)
388; CHECK-NOV-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
389; CHECK-NOV-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
390; CHECK-NOV-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
391; CHECK-NOV-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
392; CHECK-NOV-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
393; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
394; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
395; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
396; CHECK-NOV-NEXT:    .cfi_restore ra
397; CHECK-NOV-NEXT:    .cfi_restore s0
398; CHECK-NOV-NEXT:    .cfi_restore s1
399; CHECK-NOV-NEXT:    .cfi_restore s2
400; CHECK-NOV-NEXT:    .cfi_restore s3
401; CHECK-NOV-NEXT:    .cfi_restore fs0
402; CHECK-NOV-NEXT:    .cfi_restore fs1
403; CHECK-NOV-NEXT:    .cfi_restore fs2
404; CHECK-NOV-NEXT:    addi sp, sp, 64
405; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
406; CHECK-NOV-NEXT:    ret
407; CHECK-NOV-NEXT:  .LBB6_10: # %entry
408; CHECK-NOV-NEXT:    .cfi_restore_state
409; CHECK-NOV-NEXT:    mv a0, a4
410; CHECK-NOV-NEXT:    fcvt.l.s a2, fs1, rtz
411; CHECK-NOV-NEXT:    blt s1, a4, .LBB6_2
412; CHECK-NOV-NEXT:  .LBB6_11: # %entry
413; CHECK-NOV-NEXT:    mv s1, a4
414; CHECK-NOV-NEXT:    fcvt.l.s a3, fs0, rtz
415; CHECK-NOV-NEXT:    blt a2, a4, .LBB6_3
416; CHECK-NOV-NEXT:  .LBB6_12: # %entry
417; CHECK-NOV-NEXT:    mv a2, a4
418; CHECK-NOV-NEXT:    blt a3, a4, .LBB6_4
419; CHECK-NOV-NEXT:  .LBB6_13: # %entry
420; CHECK-NOV-NEXT:    mv a3, a4
421; CHECK-NOV-NEXT:    blt a1, a4, .LBB6_5
422; CHECK-NOV-NEXT:  .LBB6_14: # %entry
423; CHECK-NOV-NEXT:    lui a3, 524288
424; CHECK-NOV-NEXT:    blt a1, a2, .LBB6_6
425; CHECK-NOV-NEXT:  .LBB6_15: # %entry
426; CHECK-NOV-NEXT:    lui a2, 524288
427; CHECK-NOV-NEXT:    blt a1, s1, .LBB6_7
428; CHECK-NOV-NEXT:  .LBB6_16: # %entry
429; CHECK-NOV-NEXT:    lui s1, 524288
430; CHECK-NOV-NEXT:    bge a1, a0, .LBB6_8
431; CHECK-NOV-NEXT:    j .LBB6_9
432;
433; CHECK-V-LABEL: stest_f16i32:
434; CHECK-V:       # %bb.0: # %entry
435; CHECK-V-NEXT:    addi sp, sp, -48
436; CHECK-V-NEXT:    .cfi_def_cfa_offset 48
437; CHECK-V-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
438; CHECK-V-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
439; CHECK-V-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
440; CHECK-V-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
441; CHECK-V-NEXT:    .cfi_offset ra, -8
442; CHECK-V-NEXT:    .cfi_offset s0, -16
443; CHECK-V-NEXT:    .cfi_offset s1, -24
444; CHECK-V-NEXT:    .cfi_offset s2, -32
445; CHECK-V-NEXT:    csrr a1, vlenb
446; CHECK-V-NEXT:    slli a2, a1, 1
447; CHECK-V-NEXT:    add a1, a2, a1
448; CHECK-V-NEXT:    sub sp, sp, a1
449; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
450; CHECK-V-NEXT:    lhu s0, 0(a0)
451; CHECK-V-NEXT:    lhu s1, 8(a0)
452; CHECK-V-NEXT:    lhu s2, 16(a0)
453; CHECK-V-NEXT:    lhu a0, 24(a0)
454; CHECK-V-NEXT:    fmv.w.x fa0, a0
455; CHECK-V-NEXT:    call __extendhfsf2
456; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
457; CHECK-V-NEXT:    fmv.w.x fa0, s2
458; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
459; CHECK-V-NEXT:    vmv.s.x v8, a0
460; CHECK-V-NEXT:    addi a0, sp, 16
461; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
462; CHECK-V-NEXT:    call __extendhfsf2
463; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
464; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
465; CHECK-V-NEXT:    vmv.s.x v8, a0
466; CHECK-V-NEXT:    addi a0, sp, 16
467; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
468; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
469; CHECK-V-NEXT:    csrr a0, vlenb
470; CHECK-V-NEXT:    add a0, sp, a0
471; CHECK-V-NEXT:    addi a0, a0, 16
472; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
473; CHECK-V-NEXT:    fmv.w.x fa0, s1
474; CHECK-V-NEXT:    call __extendhfsf2
475; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
476; CHECK-V-NEXT:    fmv.w.x fa0, s0
477; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
478; CHECK-V-NEXT:    vmv.s.x v8, a0
479; CHECK-V-NEXT:    addi a0, sp, 16
480; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
481; CHECK-V-NEXT:    call __extendhfsf2
482; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
483; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
484; CHECK-V-NEXT:    vmv.s.x v10, a0
485; CHECK-V-NEXT:    addi a0, sp, 16
486; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
487; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
488; CHECK-V-NEXT:    csrr a0, vlenb
489; CHECK-V-NEXT:    add a0, sp, a0
490; CHECK-V-NEXT:    addi a0, a0, 16
491; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
492; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
493; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
494; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
495; CHECK-V-NEXT:    vnclip.wi v8, v10, 0
496; CHECK-V-NEXT:    csrr a0, vlenb
497; CHECK-V-NEXT:    slli a1, a0, 1
498; CHECK-V-NEXT:    add a0, a1, a0
499; CHECK-V-NEXT:    add sp, sp, a0
500; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
501; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
502; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
503; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
504; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
505; CHECK-V-NEXT:    .cfi_restore ra
506; CHECK-V-NEXT:    .cfi_restore s0
507; CHECK-V-NEXT:    .cfi_restore s1
508; CHECK-V-NEXT:    .cfi_restore s2
509; CHECK-V-NEXT:    addi sp, sp, 48
510; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
511; CHECK-V-NEXT:    ret
512entry:
513  %conv = fptosi <4 x half> %x to <4 x i64>
514  %0 = icmp slt <4 x i64> %conv, <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
515  %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>
516  %1 = icmp sgt <4 x i64> %spec.store.select, <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
517  %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>
518  %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
519  ret <4 x i32> %conv6
520}
521
522define <4 x i32> @utesth_f16i32(<4 x half> %x) {
523; CHECK-NOV-LABEL: utesth_f16i32:
524; CHECK-NOV:       # %bb.0: # %entry
525; CHECK-NOV-NEXT:    addi sp, sp, -64
526; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 64
527; CHECK-NOV-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
528; CHECK-NOV-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
529; CHECK-NOV-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
530; CHECK-NOV-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
531; CHECK-NOV-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
532; CHECK-NOV-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
533; CHECK-NOV-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
534; CHECK-NOV-NEXT:    fsd fs2, 0(sp) # 8-byte Folded Spill
535; CHECK-NOV-NEXT:    .cfi_offset ra, -8
536; CHECK-NOV-NEXT:    .cfi_offset s0, -16
537; CHECK-NOV-NEXT:    .cfi_offset s1, -24
538; CHECK-NOV-NEXT:    .cfi_offset s2, -32
539; CHECK-NOV-NEXT:    .cfi_offset s3, -40
540; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
541; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
542; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
543; CHECK-NOV-NEXT:    .cfi_remember_state
544; CHECK-NOV-NEXT:    lhu s1, 0(a1)
545; CHECK-NOV-NEXT:    lhu a2, 8(a1)
546; CHECK-NOV-NEXT:    lhu s2, 16(a1)
547; CHECK-NOV-NEXT:    lhu s3, 24(a1)
548; CHECK-NOV-NEXT:    mv s0, a0
549; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
550; CHECK-NOV-NEXT:    call __extendhfsf2
551; CHECK-NOV-NEXT:    fmv.s fs2, fa0
552; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
553; CHECK-NOV-NEXT:    call __extendhfsf2
554; CHECK-NOV-NEXT:    fmv.s fs1, fa0
555; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
556; CHECK-NOV-NEXT:    call __extendhfsf2
557; CHECK-NOV-NEXT:    fmv.s fs0, fa0
558; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
559; CHECK-NOV-NEXT:    fcvt.lu.s s1, fs2, rtz
560; CHECK-NOV-NEXT:    call __extendhfsf2
561; CHECK-NOV-NEXT:    fcvt.lu.s a0, fa0, rtz
562; CHECK-NOV-NEXT:    li a1, -1
563; CHECK-NOV-NEXT:    srli a1, a1, 32
564; CHECK-NOV-NEXT:    bgeu a0, a1, .LBB7_6
565; CHECK-NOV-NEXT:  # %bb.1: # %entry
566; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs1, rtz
567; CHECK-NOV-NEXT:    bgeu s1, a1, .LBB7_7
568; CHECK-NOV-NEXT:  .LBB7_2: # %entry
569; CHECK-NOV-NEXT:    fcvt.lu.s a3, fs0, rtz
570; CHECK-NOV-NEXT:    bgeu a2, a1, .LBB7_8
571; CHECK-NOV-NEXT:  .LBB7_3: # %entry
572; CHECK-NOV-NEXT:    bltu a3, a1, .LBB7_5
573; CHECK-NOV-NEXT:  .LBB7_4: # %entry
574; CHECK-NOV-NEXT:    mv a3, a1
575; CHECK-NOV-NEXT:  .LBB7_5: # %entry
576; CHECK-NOV-NEXT:    sw a0, 0(s0)
577; CHECK-NOV-NEXT:    sw s1, 4(s0)
578; CHECK-NOV-NEXT:    sw a2, 8(s0)
579; CHECK-NOV-NEXT:    sw a3, 12(s0)
580; CHECK-NOV-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
581; CHECK-NOV-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
582; CHECK-NOV-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
583; CHECK-NOV-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
584; CHECK-NOV-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
585; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
586; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
587; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
588; CHECK-NOV-NEXT:    .cfi_restore ra
589; CHECK-NOV-NEXT:    .cfi_restore s0
590; CHECK-NOV-NEXT:    .cfi_restore s1
591; CHECK-NOV-NEXT:    .cfi_restore s2
592; CHECK-NOV-NEXT:    .cfi_restore s3
593; CHECK-NOV-NEXT:    .cfi_restore fs0
594; CHECK-NOV-NEXT:    .cfi_restore fs1
595; CHECK-NOV-NEXT:    .cfi_restore fs2
596; CHECK-NOV-NEXT:    addi sp, sp, 64
597; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
598; CHECK-NOV-NEXT:    ret
599; CHECK-NOV-NEXT:  .LBB7_6: # %entry
600; CHECK-NOV-NEXT:    .cfi_restore_state
601; CHECK-NOV-NEXT:    mv a0, a1
602; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs1, rtz
603; CHECK-NOV-NEXT:    bltu s1, a1, .LBB7_2
604; CHECK-NOV-NEXT:  .LBB7_7: # %entry
605; CHECK-NOV-NEXT:    mv s1, a1
606; CHECK-NOV-NEXT:    fcvt.lu.s a3, fs0, rtz
607; CHECK-NOV-NEXT:    bltu a2, a1, .LBB7_3
608; CHECK-NOV-NEXT:  .LBB7_8: # %entry
609; CHECK-NOV-NEXT:    mv a2, a1
610; CHECK-NOV-NEXT:    bgeu a3, a1, .LBB7_4
611; CHECK-NOV-NEXT:    j .LBB7_5
612;
613; CHECK-V-LABEL: utesth_f16i32:
614; CHECK-V:       # %bb.0: # %entry
615; CHECK-V-NEXT:    addi sp, sp, -48
616; CHECK-V-NEXT:    .cfi_def_cfa_offset 48
617; CHECK-V-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
618; CHECK-V-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
619; CHECK-V-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
620; CHECK-V-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
621; CHECK-V-NEXT:    .cfi_offset ra, -8
622; CHECK-V-NEXT:    .cfi_offset s0, -16
623; CHECK-V-NEXT:    .cfi_offset s1, -24
624; CHECK-V-NEXT:    .cfi_offset s2, -32
625; CHECK-V-NEXT:    csrr a1, vlenb
626; CHECK-V-NEXT:    slli a2, a1, 1
627; CHECK-V-NEXT:    add a1, a2, a1
628; CHECK-V-NEXT:    sub sp, sp, a1
629; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
630; CHECK-V-NEXT:    lhu s0, 0(a0)
631; CHECK-V-NEXT:    lhu s1, 8(a0)
632; CHECK-V-NEXT:    lhu s2, 16(a0)
633; CHECK-V-NEXT:    lhu a0, 24(a0)
634; CHECK-V-NEXT:    fmv.w.x fa0, a0
635; CHECK-V-NEXT:    call __extendhfsf2
636; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
637; CHECK-V-NEXT:    fmv.w.x fa0, s2
638; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
639; CHECK-V-NEXT:    vmv.s.x v8, a0
640; CHECK-V-NEXT:    addi a0, sp, 16
641; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
642; CHECK-V-NEXT:    call __extendhfsf2
643; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
644; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
645; CHECK-V-NEXT:    vmv.s.x v8, a0
646; CHECK-V-NEXT:    addi a0, sp, 16
647; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
648; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
649; CHECK-V-NEXT:    csrr a0, vlenb
650; CHECK-V-NEXT:    add a0, sp, a0
651; CHECK-V-NEXT:    addi a0, a0, 16
652; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
653; CHECK-V-NEXT:    fmv.w.x fa0, s1
654; CHECK-V-NEXT:    call __extendhfsf2
655; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
656; CHECK-V-NEXT:    fmv.w.x fa0, s0
657; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
658; CHECK-V-NEXT:    vmv.s.x v8, a0
659; CHECK-V-NEXT:    addi a0, sp, 16
660; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
661; CHECK-V-NEXT:    call __extendhfsf2
662; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
663; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
664; CHECK-V-NEXT:    vmv.s.x v10, a0
665; CHECK-V-NEXT:    addi a0, sp, 16
666; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
667; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
668; CHECK-V-NEXT:    csrr a0, vlenb
669; CHECK-V-NEXT:    add a0, sp, a0
670; CHECK-V-NEXT:    addi a0, a0, 16
671; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
672; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
673; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
674; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
675; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
676; CHECK-V-NEXT:    csrr a0, vlenb
677; CHECK-V-NEXT:    slli a1, a0, 1
678; CHECK-V-NEXT:    add a0, a1, a0
679; CHECK-V-NEXT:    add sp, sp, a0
680; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
681; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
682; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
683; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
684; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
685; CHECK-V-NEXT:    .cfi_restore ra
686; CHECK-V-NEXT:    .cfi_restore s0
687; CHECK-V-NEXT:    .cfi_restore s1
688; CHECK-V-NEXT:    .cfi_restore s2
689; CHECK-V-NEXT:    addi sp, sp, 48
690; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
691; CHECK-V-NEXT:    ret
692entry:
693  %conv = fptoui <4 x half> %x to <4 x i64>
694  %0 = icmp ult <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
695  %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
696  %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32>
697  ret <4 x i32> %conv6
698}
699
700define <4 x i32> @ustest_f16i32(<4 x half> %x) {
701; CHECK-NOV-LABEL: ustest_f16i32:
702; CHECK-NOV:       # %bb.0: # %entry
703; CHECK-NOV-NEXT:    addi sp, sp, -64
704; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 64
705; CHECK-NOV-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
706; CHECK-NOV-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
707; CHECK-NOV-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
708; CHECK-NOV-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
709; CHECK-NOV-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
710; CHECK-NOV-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
711; CHECK-NOV-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
712; CHECK-NOV-NEXT:    fsd fs2, 0(sp) # 8-byte Folded Spill
713; CHECK-NOV-NEXT:    .cfi_offset ra, -8
714; CHECK-NOV-NEXT:    .cfi_offset s0, -16
715; CHECK-NOV-NEXT:    .cfi_offset s1, -24
716; CHECK-NOV-NEXT:    .cfi_offset s2, -32
717; CHECK-NOV-NEXT:    .cfi_offset s3, -40
718; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
719; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
720; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
721; CHECK-NOV-NEXT:    .cfi_remember_state
722; CHECK-NOV-NEXT:    lhu s1, 0(a1)
723; CHECK-NOV-NEXT:    lhu s2, 8(a1)
724; CHECK-NOV-NEXT:    lhu a2, 16(a1)
725; CHECK-NOV-NEXT:    lhu s3, 24(a1)
726; CHECK-NOV-NEXT:    mv s0, a0
727; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
728; CHECK-NOV-NEXT:    call __extendhfsf2
729; CHECK-NOV-NEXT:    fmv.s fs2, fa0
730; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
731; CHECK-NOV-NEXT:    call __extendhfsf2
732; CHECK-NOV-NEXT:    fmv.s fs1, fa0
733; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
734; CHECK-NOV-NEXT:    call __extendhfsf2
735; CHECK-NOV-NEXT:    fmv.s fs0, fa0
736; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
737; CHECK-NOV-NEXT:    fcvt.l.s s1, fs2, rtz
738; CHECK-NOV-NEXT:    call __extendhfsf2
739; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
740; CHECK-NOV-NEXT:    li a2, -1
741; CHECK-NOV-NEXT:    srli a2, a2, 32
742; CHECK-NOV-NEXT:    bge a0, a2, .LBB8_6
743; CHECK-NOV-NEXT:  # %bb.1: # %entry
744; CHECK-NOV-NEXT:    fcvt.l.s a1, fs1, rtz
745; CHECK-NOV-NEXT:    bge s1, a2, .LBB8_7
746; CHECK-NOV-NEXT:  .LBB8_2: # %entry
747; CHECK-NOV-NEXT:    fcvt.l.s a3, fs0, rtz
748; CHECK-NOV-NEXT:    bge a1, a2, .LBB8_8
749; CHECK-NOV-NEXT:  .LBB8_3: # %entry
750; CHECK-NOV-NEXT:    blt a3, a2, .LBB8_5
751; CHECK-NOV-NEXT:  .LBB8_4: # %entry
752; CHECK-NOV-NEXT:    mv a3, a2
753; CHECK-NOV-NEXT:  .LBB8_5: # %entry
754; CHECK-NOV-NEXT:    sgtz a2, a0
755; CHECK-NOV-NEXT:    sgtz a4, s1
756; CHECK-NOV-NEXT:    sgtz a5, a1
757; CHECK-NOV-NEXT:    sgtz a6, a3
758; CHECK-NOV-NEXT:    negw a6, a6
759; CHECK-NOV-NEXT:    negw a5, a5
760; CHECK-NOV-NEXT:    negw a4, a4
761; CHECK-NOV-NEXT:    negw a2, a2
762; CHECK-NOV-NEXT:    and a3, a6, a3
763; CHECK-NOV-NEXT:    and a1, a5, a1
764; CHECK-NOV-NEXT:    and a4, a4, s1
765; CHECK-NOV-NEXT:    and a0, a2, a0
766; CHECK-NOV-NEXT:    sw a3, 0(s0)
767; CHECK-NOV-NEXT:    sw a1, 4(s0)
768; CHECK-NOV-NEXT:    sw a4, 8(s0)
769; CHECK-NOV-NEXT:    sw a0, 12(s0)
770; CHECK-NOV-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
771; CHECK-NOV-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
772; CHECK-NOV-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
773; CHECK-NOV-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
774; CHECK-NOV-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
775; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
776; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
777; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
778; CHECK-NOV-NEXT:    .cfi_restore ra
779; CHECK-NOV-NEXT:    .cfi_restore s0
780; CHECK-NOV-NEXT:    .cfi_restore s1
781; CHECK-NOV-NEXT:    .cfi_restore s2
782; CHECK-NOV-NEXT:    .cfi_restore s3
783; CHECK-NOV-NEXT:    .cfi_restore fs0
784; CHECK-NOV-NEXT:    .cfi_restore fs1
785; CHECK-NOV-NEXT:    .cfi_restore fs2
786; CHECK-NOV-NEXT:    addi sp, sp, 64
787; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
788; CHECK-NOV-NEXT:    ret
789; CHECK-NOV-NEXT:  .LBB8_6: # %entry
790; CHECK-NOV-NEXT:    .cfi_restore_state
791; CHECK-NOV-NEXT:    mv a0, a2
792; CHECK-NOV-NEXT:    fcvt.l.s a1, fs1, rtz
793; CHECK-NOV-NEXT:    blt s1, a2, .LBB8_2
794; CHECK-NOV-NEXT:  .LBB8_7: # %entry
795; CHECK-NOV-NEXT:    mv s1, a2
796; CHECK-NOV-NEXT:    fcvt.l.s a3, fs0, rtz
797; CHECK-NOV-NEXT:    blt a1, a2, .LBB8_3
798; CHECK-NOV-NEXT:  .LBB8_8: # %entry
799; CHECK-NOV-NEXT:    mv a1, a2
800; CHECK-NOV-NEXT:    bge a3, a2, .LBB8_4
801; CHECK-NOV-NEXT:    j .LBB8_5
802;
803; CHECK-V-LABEL: ustest_f16i32:
804; CHECK-V:       # %bb.0: # %entry
805; CHECK-V-NEXT:    addi sp, sp, -48
806; CHECK-V-NEXT:    .cfi_def_cfa_offset 48
807; CHECK-V-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
808; CHECK-V-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
809; CHECK-V-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
810; CHECK-V-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
811; CHECK-V-NEXT:    .cfi_offset ra, -8
812; CHECK-V-NEXT:    .cfi_offset s0, -16
813; CHECK-V-NEXT:    .cfi_offset s1, -24
814; CHECK-V-NEXT:    .cfi_offset s2, -32
815; CHECK-V-NEXT:    csrr a1, vlenb
816; CHECK-V-NEXT:    slli a2, a1, 1
817; CHECK-V-NEXT:    add a1, a2, a1
818; CHECK-V-NEXT:    sub sp, sp, a1
819; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
820; CHECK-V-NEXT:    lhu s0, 0(a0)
821; CHECK-V-NEXT:    lhu s1, 8(a0)
822; CHECK-V-NEXT:    lhu s2, 16(a0)
823; CHECK-V-NEXT:    lhu a0, 24(a0)
824; CHECK-V-NEXT:    fmv.w.x fa0, a0
825; CHECK-V-NEXT:    call __extendhfsf2
826; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
827; CHECK-V-NEXT:    fmv.w.x fa0, s2
828; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
829; CHECK-V-NEXT:    vmv.s.x v8, a0
830; CHECK-V-NEXT:    addi a0, sp, 16
831; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
832; CHECK-V-NEXT:    call __extendhfsf2
833; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
834; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
835; CHECK-V-NEXT:    vmv.s.x v8, a0
836; CHECK-V-NEXT:    addi a0, sp, 16
837; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
838; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
839; CHECK-V-NEXT:    csrr a0, vlenb
840; CHECK-V-NEXT:    add a0, sp, a0
841; CHECK-V-NEXT:    addi a0, a0, 16
842; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
843; CHECK-V-NEXT:    fmv.w.x fa0, s1
844; CHECK-V-NEXT:    call __extendhfsf2
845; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
846; CHECK-V-NEXT:    fmv.w.x fa0, s0
847; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
848; CHECK-V-NEXT:    vmv.s.x v8, a0
849; CHECK-V-NEXT:    addi a0, sp, 16
850; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
851; CHECK-V-NEXT:    call __extendhfsf2
852; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
853; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
854; CHECK-V-NEXT:    vmv.s.x v8, a0
855; CHECK-V-NEXT:    addi a0, sp, 16
856; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
857; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
858; CHECK-V-NEXT:    csrr a0, vlenb
859; CHECK-V-NEXT:    add a0, sp, a0
860; CHECK-V-NEXT:    addi a0, a0, 16
861; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
862; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
863; CHECK-V-NEXT:    vslideup.vi v8, v10, 2
864; CHECK-V-NEXT:    vmax.vx v10, v8, zero
865; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
866; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
867; CHECK-V-NEXT:    csrr a0, vlenb
868; CHECK-V-NEXT:    slli a1, a0, 1
869; CHECK-V-NEXT:    add a0, a1, a0
870; CHECK-V-NEXT:    add sp, sp, a0
871; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
872; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
873; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
874; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
875; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
876; CHECK-V-NEXT:    .cfi_restore ra
877; CHECK-V-NEXT:    .cfi_restore s0
878; CHECK-V-NEXT:    .cfi_restore s1
879; CHECK-V-NEXT:    .cfi_restore s2
880; CHECK-V-NEXT:    addi sp, sp, 48
881; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
882; CHECK-V-NEXT:    ret
883entry:
884  %conv = fptosi <4 x half> %x to <4 x i64>
885  %0 = icmp slt <4 x i64> %conv, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
886  %spec.store.select = select <4 x i1> %0, <4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
887  %1 = icmp sgt <4 x i64> %spec.store.select, zeroinitializer
888  %spec.store.select7 = select <4 x i1> %1, <4 x i64> %spec.store.select, <4 x i64> zeroinitializer
889  %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
890  ret <4 x i32> %conv6
891}
892
893; i16 saturate
894
895define <2 x i16> @stest_f64i16(<2 x double> %x) {
896; CHECK-NOV-LABEL: stest_f64i16:
897; CHECK-NOV:       # %bb.0: # %entry
898; CHECK-NOV-NEXT:    fcvt.w.d a1, fa1, rtz
899; CHECK-NOV-NEXT:    lui a2, 8
900; CHECK-NOV-NEXT:    addiw a2, a2, -1
901; CHECK-NOV-NEXT:    fcvt.w.d a0, fa0, rtz
902; CHECK-NOV-NEXT:    bge a1, a2, .LBB9_5
903; CHECK-NOV-NEXT:  # %bb.1: # %entry
904; CHECK-NOV-NEXT:    bge a0, a2, .LBB9_6
905; CHECK-NOV-NEXT:  .LBB9_2: # %entry
906; CHECK-NOV-NEXT:    lui a2, 1048568
907; CHECK-NOV-NEXT:    bge a2, a0, .LBB9_7
908; CHECK-NOV-NEXT:  .LBB9_3: # %entry
909; CHECK-NOV-NEXT:    bge a2, a1, .LBB9_8
910; CHECK-NOV-NEXT:  .LBB9_4: # %entry
911; CHECK-NOV-NEXT:    ret
912; CHECK-NOV-NEXT:  .LBB9_5: # %entry
913; CHECK-NOV-NEXT:    mv a1, a2
914; CHECK-NOV-NEXT:    blt a0, a2, .LBB9_2
915; CHECK-NOV-NEXT:  .LBB9_6: # %entry
916; CHECK-NOV-NEXT:    mv a0, a2
917; CHECK-NOV-NEXT:    lui a2, 1048568
918; CHECK-NOV-NEXT:    blt a2, a0, .LBB9_3
919; CHECK-NOV-NEXT:  .LBB9_7: # %entry
920; CHECK-NOV-NEXT:    lui a0, 1048568
921; CHECK-NOV-NEXT:    blt a2, a1, .LBB9_4
922; CHECK-NOV-NEXT:  .LBB9_8: # %entry
923; CHECK-NOV-NEXT:    lui a1, 1048568
924; CHECK-NOV-NEXT:    ret
925;
926; CHECK-V-LABEL: stest_f64i16:
927; CHECK-V:       # %bb.0: # %entry
928; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
929; CHECK-V-NEXT:    vfncvt.rtz.x.f.w v9, v8
930; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
931; CHECK-V-NEXT:    vnclip.wi v8, v9, 0
932; CHECK-V-NEXT:    ret
933entry:
934  %conv = fptosi <2 x double> %x to <2 x i32>
935  %0 = icmp slt <2 x i32> %conv, <i32 32767, i32 32767>
936  %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 32767, i32 32767>
937  %1 = icmp sgt <2 x i32> %spec.store.select, <i32 -32768, i32 -32768>
938  %spec.store.select7 = select <2 x i1> %1, <2 x i32> %spec.store.select, <2 x i32> <i32 -32768, i32 -32768>
939  %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16>
940  ret <2 x i16> %conv6
941}
942
943define <2 x i16> @utest_f64i16(<2 x double> %x) {
944; CHECK-NOV-LABEL: utest_f64i16:
945; CHECK-NOV:       # %bb.0: # %entry
946; CHECK-NOV-NEXT:    fcvt.wu.d a0, fa0, rtz
947; CHECK-NOV-NEXT:    lui a2, 16
948; CHECK-NOV-NEXT:    addiw a2, a2, -1
949; CHECK-NOV-NEXT:    fcvt.wu.d a1, fa1, rtz
950; CHECK-NOV-NEXT:    bgeu a0, a2, .LBB10_3
951; CHECK-NOV-NEXT:  # %bb.1: # %entry
952; CHECK-NOV-NEXT:    bgeu a1, a2, .LBB10_4
953; CHECK-NOV-NEXT:  .LBB10_2: # %entry
954; CHECK-NOV-NEXT:    ret
955; CHECK-NOV-NEXT:  .LBB10_3: # %entry
956; CHECK-NOV-NEXT:    mv a0, a2
957; CHECK-NOV-NEXT:    bltu a1, a2, .LBB10_2
958; CHECK-NOV-NEXT:  .LBB10_4: # %entry
959; CHECK-NOV-NEXT:    mv a1, a2
960; CHECK-NOV-NEXT:    ret
961;
962; CHECK-V-LABEL: utest_f64i16:
963; CHECK-V:       # %bb.0: # %entry
964; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
965; CHECK-V-NEXT:    vfncvt.rtz.xu.f.w v9, v8
966; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
967; CHECK-V-NEXT:    vnclipu.wi v8, v9, 0
968; CHECK-V-NEXT:    ret
969entry:
970  %conv = fptoui <2 x double> %x to <2 x i32>
971  %0 = icmp ult <2 x i32> %conv, <i32 65535, i32 65535>
972  %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 65535, i32 65535>
973  %conv6 = trunc <2 x i32> %spec.store.select to <2 x i16>
974  ret <2 x i16> %conv6
975}
976
977define <2 x i16> @ustest_f64i16(<2 x double> %x) {
978; CHECK-NOV-LABEL: ustest_f64i16:
979; CHECK-NOV:       # %bb.0: # %entry
980; CHECK-NOV-NEXT:    fcvt.w.d a1, fa1, rtz
981; CHECK-NOV-NEXT:    lui a2, 16
982; CHECK-NOV-NEXT:    addiw a2, a2, -1
983; CHECK-NOV-NEXT:    fcvt.w.d a0, fa0, rtz
984; CHECK-NOV-NEXT:    blt a1, a2, .LBB11_2
985; CHECK-NOV-NEXT:  # %bb.1: # %entry
986; CHECK-NOV-NEXT:    mv a1, a2
987; CHECK-NOV-NEXT:  .LBB11_2: # %entry
988; CHECK-NOV-NEXT:    blt a0, a2, .LBB11_4
989; CHECK-NOV-NEXT:  # %bb.3: # %entry
990; CHECK-NOV-NEXT:    mv a0, a2
991; CHECK-NOV-NEXT:  .LBB11_4: # %entry
992; CHECK-NOV-NEXT:    sgtz a2, a1
993; CHECK-NOV-NEXT:    sgtz a3, a0
994; CHECK-NOV-NEXT:    neg a3, a3
995; CHECK-NOV-NEXT:    neg a2, a2
996; CHECK-NOV-NEXT:    and a0, a3, a0
997; CHECK-NOV-NEXT:    and a1, a2, a1
998; CHECK-NOV-NEXT:    ret
999;
1000; CHECK-V-LABEL: ustest_f64i16:
1001; CHECK-V:       # %bb.0: # %entry
1002; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1003; CHECK-V-NEXT:    vfncvt.rtz.x.f.w v9, v8
1004; CHECK-V-NEXT:    vmax.vx v8, v9, zero
1005; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1006; CHECK-V-NEXT:    vnclipu.wi v8, v8, 0
1007; CHECK-V-NEXT:    ret
1008entry:
1009  %conv = fptosi <2 x double> %x to <2 x i32>
1010  %0 = icmp slt <2 x i32> %conv, <i32 65535, i32 65535>
1011  %spec.store.select = select <2 x i1> %0, <2 x i32> %conv, <2 x i32> <i32 65535, i32 65535>
1012  %1 = icmp sgt <2 x i32> %spec.store.select, zeroinitializer
1013  %spec.store.select7 = select <2 x i1> %1, <2 x i32> %spec.store.select, <2 x i32> zeroinitializer
1014  %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16>
1015  ret <2 x i16> %conv6
1016}
1017
1018define <4 x i16> @stest_f32i16(<4 x float> %x) {
1019; CHECK-NOV-LABEL: stest_f32i16:
1020; CHECK-NOV:       # %bb.0: # %entry
1021; CHECK-NOV-NEXT:    fcvt.w.s a1, fa3, rtz
1022; CHECK-NOV-NEXT:    lui a5, 8
1023; CHECK-NOV-NEXT:    addiw a5, a5, -1
1024; CHECK-NOV-NEXT:    fcvt.w.s a2, fa2, rtz
1025; CHECK-NOV-NEXT:    bge a1, a5, .LBB12_10
1026; CHECK-NOV-NEXT:  # %bb.1: # %entry
1027; CHECK-NOV-NEXT:    fcvt.w.s a3, fa1, rtz
1028; CHECK-NOV-NEXT:    bge a2, a5, .LBB12_11
1029; CHECK-NOV-NEXT:  .LBB12_2: # %entry
1030; CHECK-NOV-NEXT:    fcvt.w.s a4, fa0, rtz
1031; CHECK-NOV-NEXT:    bge a3, a5, .LBB12_12
1032; CHECK-NOV-NEXT:  .LBB12_3: # %entry
1033; CHECK-NOV-NEXT:    bge a4, a5, .LBB12_13
1034; CHECK-NOV-NEXT:  .LBB12_4: # %entry
1035; CHECK-NOV-NEXT:    lui a5, 1048568
1036; CHECK-NOV-NEXT:    bge a5, a4, .LBB12_14
1037; CHECK-NOV-NEXT:  .LBB12_5: # %entry
1038; CHECK-NOV-NEXT:    bge a5, a3, .LBB12_15
1039; CHECK-NOV-NEXT:  .LBB12_6: # %entry
1040; CHECK-NOV-NEXT:    bge a5, a2, .LBB12_16
1041; CHECK-NOV-NEXT:  .LBB12_7: # %entry
1042; CHECK-NOV-NEXT:    blt a5, a1, .LBB12_9
1043; CHECK-NOV-NEXT:  .LBB12_8: # %entry
1044; CHECK-NOV-NEXT:    lui a1, 1048568
1045; CHECK-NOV-NEXT:  .LBB12_9: # %entry
1046; CHECK-NOV-NEXT:    sh a4, 0(a0)
1047; CHECK-NOV-NEXT:    sh a3, 2(a0)
1048; CHECK-NOV-NEXT:    sh a2, 4(a0)
1049; CHECK-NOV-NEXT:    sh a1, 6(a0)
1050; CHECK-NOV-NEXT:    ret
1051; CHECK-NOV-NEXT:  .LBB12_10: # %entry
1052; CHECK-NOV-NEXT:    mv a1, a5
1053; CHECK-NOV-NEXT:    fcvt.w.s a3, fa1, rtz
1054; CHECK-NOV-NEXT:    blt a2, a5, .LBB12_2
1055; CHECK-NOV-NEXT:  .LBB12_11: # %entry
1056; CHECK-NOV-NEXT:    mv a2, a5
1057; CHECK-NOV-NEXT:    fcvt.w.s a4, fa0, rtz
1058; CHECK-NOV-NEXT:    blt a3, a5, .LBB12_3
1059; CHECK-NOV-NEXT:  .LBB12_12: # %entry
1060; CHECK-NOV-NEXT:    mv a3, a5
1061; CHECK-NOV-NEXT:    blt a4, a5, .LBB12_4
1062; CHECK-NOV-NEXT:  .LBB12_13: # %entry
1063; CHECK-NOV-NEXT:    mv a4, a5
1064; CHECK-NOV-NEXT:    lui a5, 1048568
1065; CHECK-NOV-NEXT:    blt a5, a4, .LBB12_5
1066; CHECK-NOV-NEXT:  .LBB12_14: # %entry
1067; CHECK-NOV-NEXT:    lui a4, 1048568
1068; CHECK-NOV-NEXT:    blt a5, a3, .LBB12_6
1069; CHECK-NOV-NEXT:  .LBB12_15: # %entry
1070; CHECK-NOV-NEXT:    lui a3, 1048568
1071; CHECK-NOV-NEXT:    blt a5, a2, .LBB12_7
1072; CHECK-NOV-NEXT:  .LBB12_16: # %entry
1073; CHECK-NOV-NEXT:    lui a2, 1048568
1074; CHECK-NOV-NEXT:    bge a5, a1, .LBB12_8
1075; CHECK-NOV-NEXT:    j .LBB12_9
1076;
1077; CHECK-V-LABEL: stest_f32i16:
1078; CHECK-V:       # %bb.0: # %entry
1079; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1080; CHECK-V-NEXT:    vfcvt.rtz.x.f.v v8, v8
1081; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
1082; CHECK-V-NEXT:    vnclip.wi v8, v8, 0
1083; CHECK-V-NEXT:    ret
1084entry:
1085  %conv = fptosi <4 x float> %x to <4 x i32>
1086  %0 = icmp slt <4 x i32> %conv, <i32 32767, i32 32767, i32 32767, i32 32767>
1087  %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
1088  %1 = icmp sgt <4 x i32> %spec.store.select, <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
1089  %spec.store.select7 = select <4 x i1> %1, <4 x i32> %spec.store.select, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
1090  %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16>
1091  ret <4 x i16> %conv6
1092}
1093
1094define <4 x i16> @utest_f32i16(<4 x float> %x) {
1095; CHECK-NOV-LABEL: utest_f32i16:
1096; CHECK-NOV:       # %bb.0: # %entry
1097; CHECK-NOV-NEXT:    fcvt.wu.s a1, fa0, rtz
1098; CHECK-NOV-NEXT:    lui a3, 16
1099; CHECK-NOV-NEXT:    addiw a3, a3, -1
1100; CHECK-NOV-NEXT:    fcvt.wu.s a2, fa1, rtz
1101; CHECK-NOV-NEXT:    bgeu a1, a3, .LBB13_6
1102; CHECK-NOV-NEXT:  # %bb.1: # %entry
1103; CHECK-NOV-NEXT:    fcvt.wu.s a4, fa2, rtz
1104; CHECK-NOV-NEXT:    bgeu a2, a3, .LBB13_7
1105; CHECK-NOV-NEXT:  .LBB13_2: # %entry
1106; CHECK-NOV-NEXT:    fcvt.wu.s a5, fa3, rtz
1107; CHECK-NOV-NEXT:    bgeu a4, a3, .LBB13_8
1108; CHECK-NOV-NEXT:  .LBB13_3: # %entry
1109; CHECK-NOV-NEXT:    bltu a5, a3, .LBB13_5
1110; CHECK-NOV-NEXT:  .LBB13_4: # %entry
1111; CHECK-NOV-NEXT:    mv a5, a3
1112; CHECK-NOV-NEXT:  .LBB13_5: # %entry
1113; CHECK-NOV-NEXT:    sh a1, 0(a0)
1114; CHECK-NOV-NEXT:    sh a2, 2(a0)
1115; CHECK-NOV-NEXT:    sh a4, 4(a0)
1116; CHECK-NOV-NEXT:    sh a5, 6(a0)
1117; CHECK-NOV-NEXT:    ret
1118; CHECK-NOV-NEXT:  .LBB13_6: # %entry
1119; CHECK-NOV-NEXT:    mv a1, a3
1120; CHECK-NOV-NEXT:    fcvt.wu.s a4, fa2, rtz
1121; CHECK-NOV-NEXT:    bltu a2, a3, .LBB13_2
1122; CHECK-NOV-NEXT:  .LBB13_7: # %entry
1123; CHECK-NOV-NEXT:    mv a2, a3
1124; CHECK-NOV-NEXT:    fcvt.wu.s a5, fa3, rtz
1125; CHECK-NOV-NEXT:    bltu a4, a3, .LBB13_3
1126; CHECK-NOV-NEXT:  .LBB13_8: # %entry
1127; CHECK-NOV-NEXT:    mv a4, a3
1128; CHECK-NOV-NEXT:    bgeu a5, a3, .LBB13_4
1129; CHECK-NOV-NEXT:    j .LBB13_5
1130;
1131; CHECK-V-LABEL: utest_f32i16:
1132; CHECK-V:       # %bb.0: # %entry
1133; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1134; CHECK-V-NEXT:    vfcvt.rtz.xu.f.v v8, v8
1135; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
1136; CHECK-V-NEXT:    vnclipu.wi v8, v8, 0
1137; CHECK-V-NEXT:    ret
1138entry:
1139  %conv = fptoui <4 x float> %x to <4 x i32>
1140  %0 = icmp ult <4 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535>
1141  %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>
1142  %conv6 = trunc <4 x i32> %spec.store.select to <4 x i16>
1143  ret <4 x i16> %conv6
1144}
1145
1146define <4 x i16> @ustest_f32i16(<4 x float> %x) {
1147; CHECK-NOV-LABEL: ustest_f32i16:
1148; CHECK-NOV:       # %bb.0: # %entry
1149; CHECK-NOV-NEXT:    fcvt.w.s a1, fa3, rtz
1150; CHECK-NOV-NEXT:    lui a4, 16
1151; CHECK-NOV-NEXT:    addiw a4, a4, -1
1152; CHECK-NOV-NEXT:    fcvt.w.s a2, fa2, rtz
1153; CHECK-NOV-NEXT:    bge a1, a4, .LBB14_6
1154; CHECK-NOV-NEXT:  # %bb.1: # %entry
1155; CHECK-NOV-NEXT:    fcvt.w.s a3, fa1, rtz
1156; CHECK-NOV-NEXT:    bge a2, a4, .LBB14_7
1157; CHECK-NOV-NEXT:  .LBB14_2: # %entry
1158; CHECK-NOV-NEXT:    fcvt.w.s a5, fa0, rtz
1159; CHECK-NOV-NEXT:    bge a3, a4, .LBB14_8
1160; CHECK-NOV-NEXT:  .LBB14_3: # %entry
1161; CHECK-NOV-NEXT:    blt a5, a4, .LBB14_5
1162; CHECK-NOV-NEXT:  .LBB14_4: # %entry
1163; CHECK-NOV-NEXT:    mv a5, a4
1164; CHECK-NOV-NEXT:  .LBB14_5: # %entry
1165; CHECK-NOV-NEXT:    sgtz a4, a1
1166; CHECK-NOV-NEXT:    sgtz a6, a2
1167; CHECK-NOV-NEXT:    sgtz a7, a3
1168; CHECK-NOV-NEXT:    sgtz t0, a5
1169; CHECK-NOV-NEXT:    negw t0, t0
1170; CHECK-NOV-NEXT:    negw a7, a7
1171; CHECK-NOV-NEXT:    negw a6, a6
1172; CHECK-NOV-NEXT:    negw a4, a4
1173; CHECK-NOV-NEXT:    and a5, t0, a5
1174; CHECK-NOV-NEXT:    and a3, a7, a3
1175; CHECK-NOV-NEXT:    and a2, a6, a2
1176; CHECK-NOV-NEXT:    and a1, a4, a1
1177; CHECK-NOV-NEXT:    sh a5, 0(a0)
1178; CHECK-NOV-NEXT:    sh a3, 2(a0)
1179; CHECK-NOV-NEXT:    sh a2, 4(a0)
1180; CHECK-NOV-NEXT:    sh a1, 6(a0)
1181; CHECK-NOV-NEXT:    ret
1182; CHECK-NOV-NEXT:  .LBB14_6: # %entry
1183; CHECK-NOV-NEXT:    mv a1, a4
1184; CHECK-NOV-NEXT:    fcvt.w.s a3, fa1, rtz
1185; CHECK-NOV-NEXT:    blt a2, a4, .LBB14_2
1186; CHECK-NOV-NEXT:  .LBB14_7: # %entry
1187; CHECK-NOV-NEXT:    mv a2, a4
1188; CHECK-NOV-NEXT:    fcvt.w.s a5, fa0, rtz
1189; CHECK-NOV-NEXT:    blt a3, a4, .LBB14_3
1190; CHECK-NOV-NEXT:  .LBB14_8: # %entry
1191; CHECK-NOV-NEXT:    mv a3, a4
1192; CHECK-NOV-NEXT:    bge a5, a4, .LBB14_4
1193; CHECK-NOV-NEXT:    j .LBB14_5
1194;
1195; CHECK-V-LABEL: ustest_f32i16:
1196; CHECK-V:       # %bb.0: # %entry
1197; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1198; CHECK-V-NEXT:    vfcvt.rtz.x.f.v v8, v8
1199; CHECK-V-NEXT:    vmax.vx v8, v8, zero
1200; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
1201; CHECK-V-NEXT:    vnclipu.wi v8, v8, 0
1202; CHECK-V-NEXT:    ret
1203entry:
1204  %conv = fptosi <4 x float> %x to <4 x i32>
1205  %0 = icmp slt <4 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535>
1206  %spec.store.select = select <4 x i1> %0, <4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>
1207  %1 = icmp sgt <4 x i32> %spec.store.select, zeroinitializer
1208  %spec.store.select7 = select <4 x i1> %1, <4 x i32> %spec.store.select, <4 x i32> zeroinitializer
1209  %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16>
1210  ret <4 x i16> %conv6
1211}
1212
1213define <8 x i16> @stest_f16i16(<8 x half> %x) {
1214; CHECK-NOV-LABEL: stest_f16i16:
1215; CHECK-NOV:       # %bb.0: # %entry
1216; CHECK-NOV-NEXT:    addi sp, sp, -128
1217; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 128
1218; CHECK-NOV-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
1219; CHECK-NOV-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
1220; CHECK-NOV-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
1221; CHECK-NOV-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
1222; CHECK-NOV-NEXT:    sd s3, 88(sp) # 8-byte Folded Spill
1223; CHECK-NOV-NEXT:    sd s4, 80(sp) # 8-byte Folded Spill
1224; CHECK-NOV-NEXT:    sd s5, 72(sp) # 8-byte Folded Spill
1225; CHECK-NOV-NEXT:    sd s6, 64(sp) # 8-byte Folded Spill
1226; CHECK-NOV-NEXT:    sd s7, 56(sp) # 8-byte Folded Spill
1227; CHECK-NOV-NEXT:    fsd fs0, 48(sp) # 8-byte Folded Spill
1228; CHECK-NOV-NEXT:    fsd fs1, 40(sp) # 8-byte Folded Spill
1229; CHECK-NOV-NEXT:    fsd fs2, 32(sp) # 8-byte Folded Spill
1230; CHECK-NOV-NEXT:    fsd fs3, 24(sp) # 8-byte Folded Spill
1231; CHECK-NOV-NEXT:    fsd fs4, 16(sp) # 8-byte Folded Spill
1232; CHECK-NOV-NEXT:    fsd fs5, 8(sp) # 8-byte Folded Spill
1233; CHECK-NOV-NEXT:    fsd fs6, 0(sp) # 8-byte Folded Spill
1234; CHECK-NOV-NEXT:    .cfi_offset ra, -8
1235; CHECK-NOV-NEXT:    .cfi_offset s0, -16
1236; CHECK-NOV-NEXT:    .cfi_offset s1, -24
1237; CHECK-NOV-NEXT:    .cfi_offset s2, -32
1238; CHECK-NOV-NEXT:    .cfi_offset s3, -40
1239; CHECK-NOV-NEXT:    .cfi_offset s4, -48
1240; CHECK-NOV-NEXT:    .cfi_offset s5, -56
1241; CHECK-NOV-NEXT:    .cfi_offset s6, -64
1242; CHECK-NOV-NEXT:    .cfi_offset s7, -72
1243; CHECK-NOV-NEXT:    .cfi_offset fs0, -80
1244; CHECK-NOV-NEXT:    .cfi_offset fs1, -88
1245; CHECK-NOV-NEXT:    .cfi_offset fs2, -96
1246; CHECK-NOV-NEXT:    .cfi_offset fs3, -104
1247; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
1248; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
1249; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
1250; CHECK-NOV-NEXT:    .cfi_remember_state
1251; CHECK-NOV-NEXT:    lhu s1, 32(a1)
1252; CHECK-NOV-NEXT:    lhu s2, 40(a1)
1253; CHECK-NOV-NEXT:    lhu a2, 48(a1)
1254; CHECK-NOV-NEXT:    lhu s3, 56(a1)
1255; CHECK-NOV-NEXT:    lhu s4, 0(a1)
1256; CHECK-NOV-NEXT:    lhu s5, 8(a1)
1257; CHECK-NOV-NEXT:    lhu s6, 16(a1)
1258; CHECK-NOV-NEXT:    lhu s7, 24(a1)
1259; CHECK-NOV-NEXT:    mv s0, a0
1260; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
1261; CHECK-NOV-NEXT:    call __extendhfsf2
1262; CHECK-NOV-NEXT:    fmv.s fs6, fa0
1263; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
1264; CHECK-NOV-NEXT:    call __extendhfsf2
1265; CHECK-NOV-NEXT:    fmv.s fs5, fa0
1266; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
1267; CHECK-NOV-NEXT:    call __extendhfsf2
1268; CHECK-NOV-NEXT:    fmv.s fs4, fa0
1269; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
1270; CHECK-NOV-NEXT:    call __extendhfsf2
1271; CHECK-NOV-NEXT:    fmv.s fs3, fa0
1272; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
1273; CHECK-NOV-NEXT:    call __extendhfsf2
1274; CHECK-NOV-NEXT:    fmv.s fs2, fa0
1275; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
1276; CHECK-NOV-NEXT:    call __extendhfsf2
1277; CHECK-NOV-NEXT:    fmv.s fs1, fa0
1278; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
1279; CHECK-NOV-NEXT:    call __extendhfsf2
1280; CHECK-NOV-NEXT:    fmv.s fs0, fa0
1281; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
1282; CHECK-NOV-NEXT:    fcvt.l.s s1, fs6, rtz
1283; CHECK-NOV-NEXT:    call __extendhfsf2
1284; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
1285; CHECK-NOV-NEXT:    lui a7, 8
1286; CHECK-NOV-NEXT:    addiw a7, a7, -1
1287; CHECK-NOV-NEXT:    bge a0, a7, .LBB15_18
1288; CHECK-NOV-NEXT:  # %bb.1: # %entry
1289; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
1290; CHECK-NOV-NEXT:    bge s1, a7, .LBB15_19
1291; CHECK-NOV-NEXT:  .LBB15_2: # %entry
1292; CHECK-NOV-NEXT:    fcvt.l.s a3, fs4, rtz
1293; CHECK-NOV-NEXT:    bge a1, a7, .LBB15_20
1294; CHECK-NOV-NEXT:  .LBB15_3: # %entry
1295; CHECK-NOV-NEXT:    fcvt.l.s a2, fs3, rtz
1296; CHECK-NOV-NEXT:    bge a3, a7, .LBB15_21
1297; CHECK-NOV-NEXT:  .LBB15_4: # %entry
1298; CHECK-NOV-NEXT:    fcvt.l.s a4, fs2, rtz
1299; CHECK-NOV-NEXT:    bge a2, a7, .LBB15_22
1300; CHECK-NOV-NEXT:  .LBB15_5: # %entry
1301; CHECK-NOV-NEXT:    fcvt.l.s a5, fs1, rtz
1302; CHECK-NOV-NEXT:    bge a4, a7, .LBB15_23
1303; CHECK-NOV-NEXT:  .LBB15_6: # %entry
1304; CHECK-NOV-NEXT:    fcvt.l.s a6, fs0, rtz
1305; CHECK-NOV-NEXT:    bge a5, a7, .LBB15_24
1306; CHECK-NOV-NEXT:  .LBB15_7: # %entry
1307; CHECK-NOV-NEXT:    bge a6, a7, .LBB15_25
1308; CHECK-NOV-NEXT:  .LBB15_8: # %entry
1309; CHECK-NOV-NEXT:    lui a7, 1048568
1310; CHECK-NOV-NEXT:    bge a7, a6, .LBB15_26
1311; CHECK-NOV-NEXT:  .LBB15_9: # %entry
1312; CHECK-NOV-NEXT:    bge a7, a5, .LBB15_27
1313; CHECK-NOV-NEXT:  .LBB15_10: # %entry
1314; CHECK-NOV-NEXT:    bge a7, a4, .LBB15_28
1315; CHECK-NOV-NEXT:  .LBB15_11: # %entry
1316; CHECK-NOV-NEXT:    bge a7, a2, .LBB15_29
1317; CHECK-NOV-NEXT:  .LBB15_12: # %entry
1318; CHECK-NOV-NEXT:    bge a7, a3, .LBB15_30
1319; CHECK-NOV-NEXT:  .LBB15_13: # %entry
1320; CHECK-NOV-NEXT:    bge a7, a1, .LBB15_31
1321; CHECK-NOV-NEXT:  .LBB15_14: # %entry
1322; CHECK-NOV-NEXT:    bge a7, s1, .LBB15_32
1323; CHECK-NOV-NEXT:  .LBB15_15: # %entry
1324; CHECK-NOV-NEXT:    blt a7, a0, .LBB15_17
1325; CHECK-NOV-NEXT:  .LBB15_16: # %entry
1326; CHECK-NOV-NEXT:    lui a0, 1048568
1327; CHECK-NOV-NEXT:  .LBB15_17: # %entry
1328; CHECK-NOV-NEXT:    sh a3, 8(s0)
1329; CHECK-NOV-NEXT:    sh a1, 10(s0)
1330; CHECK-NOV-NEXT:    sh s1, 12(s0)
1331; CHECK-NOV-NEXT:    sh a0, 14(s0)
1332; CHECK-NOV-NEXT:    sh a6, 0(s0)
1333; CHECK-NOV-NEXT:    sh a5, 2(s0)
1334; CHECK-NOV-NEXT:    sh a4, 4(s0)
1335; CHECK-NOV-NEXT:    sh a2, 6(s0)
1336; CHECK-NOV-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
1337; CHECK-NOV-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
1338; CHECK-NOV-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
1339; CHECK-NOV-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
1340; CHECK-NOV-NEXT:    ld s3, 88(sp) # 8-byte Folded Reload
1341; CHECK-NOV-NEXT:    ld s4, 80(sp) # 8-byte Folded Reload
1342; CHECK-NOV-NEXT:    ld s5, 72(sp) # 8-byte Folded Reload
1343; CHECK-NOV-NEXT:    ld s6, 64(sp) # 8-byte Folded Reload
1344; CHECK-NOV-NEXT:    ld s7, 56(sp) # 8-byte Folded Reload
1345; CHECK-NOV-NEXT:    fld fs0, 48(sp) # 8-byte Folded Reload
1346; CHECK-NOV-NEXT:    fld fs1, 40(sp) # 8-byte Folded Reload
1347; CHECK-NOV-NEXT:    fld fs2, 32(sp) # 8-byte Folded Reload
1348; CHECK-NOV-NEXT:    fld fs3, 24(sp) # 8-byte Folded Reload
1349; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
1350; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
1351; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
1352; CHECK-NOV-NEXT:    .cfi_restore ra
1353; CHECK-NOV-NEXT:    .cfi_restore s0
1354; CHECK-NOV-NEXT:    .cfi_restore s1
1355; CHECK-NOV-NEXT:    .cfi_restore s2
1356; CHECK-NOV-NEXT:    .cfi_restore s3
1357; CHECK-NOV-NEXT:    .cfi_restore s4
1358; CHECK-NOV-NEXT:    .cfi_restore s5
1359; CHECK-NOV-NEXT:    .cfi_restore s6
1360; CHECK-NOV-NEXT:    .cfi_restore s7
1361; CHECK-NOV-NEXT:    .cfi_restore fs0
1362; CHECK-NOV-NEXT:    .cfi_restore fs1
1363; CHECK-NOV-NEXT:    .cfi_restore fs2
1364; CHECK-NOV-NEXT:    .cfi_restore fs3
1365; CHECK-NOV-NEXT:    .cfi_restore fs4
1366; CHECK-NOV-NEXT:    .cfi_restore fs5
1367; CHECK-NOV-NEXT:    .cfi_restore fs6
1368; CHECK-NOV-NEXT:    addi sp, sp, 128
1369; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
1370; CHECK-NOV-NEXT:    ret
1371; CHECK-NOV-NEXT:  .LBB15_18: # %entry
1372; CHECK-NOV-NEXT:    .cfi_restore_state
1373; CHECK-NOV-NEXT:    mv a0, a7
1374; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
1375; CHECK-NOV-NEXT:    blt s1, a7, .LBB15_2
1376; CHECK-NOV-NEXT:  .LBB15_19: # %entry
1377; CHECK-NOV-NEXT:    mv s1, a7
1378; CHECK-NOV-NEXT:    fcvt.l.s a3, fs4, rtz
1379; CHECK-NOV-NEXT:    blt a1, a7, .LBB15_3
1380; CHECK-NOV-NEXT:  .LBB15_20: # %entry
1381; CHECK-NOV-NEXT:    mv a1, a7
1382; CHECK-NOV-NEXT:    fcvt.l.s a2, fs3, rtz
1383; CHECK-NOV-NEXT:    blt a3, a7, .LBB15_4
1384; CHECK-NOV-NEXT:  .LBB15_21: # %entry
1385; CHECK-NOV-NEXT:    mv a3, a7
1386; CHECK-NOV-NEXT:    fcvt.l.s a4, fs2, rtz
1387; CHECK-NOV-NEXT:    blt a2, a7, .LBB15_5
1388; CHECK-NOV-NEXT:  .LBB15_22: # %entry
1389; CHECK-NOV-NEXT:    mv a2, a7
1390; CHECK-NOV-NEXT:    fcvt.l.s a5, fs1, rtz
1391; CHECK-NOV-NEXT:    blt a4, a7, .LBB15_6
1392; CHECK-NOV-NEXT:  .LBB15_23: # %entry
1393; CHECK-NOV-NEXT:    mv a4, a7
1394; CHECK-NOV-NEXT:    fcvt.l.s a6, fs0, rtz
1395; CHECK-NOV-NEXT:    blt a5, a7, .LBB15_7
1396; CHECK-NOV-NEXT:  .LBB15_24: # %entry
1397; CHECK-NOV-NEXT:    mv a5, a7
1398; CHECK-NOV-NEXT:    blt a6, a7, .LBB15_8
1399; CHECK-NOV-NEXT:  .LBB15_25: # %entry
1400; CHECK-NOV-NEXT:    mv a6, a7
1401; CHECK-NOV-NEXT:    lui a7, 1048568
1402; CHECK-NOV-NEXT:    blt a7, a6, .LBB15_9
1403; CHECK-NOV-NEXT:  .LBB15_26: # %entry
1404; CHECK-NOV-NEXT:    lui a6, 1048568
1405; CHECK-NOV-NEXT:    blt a7, a5, .LBB15_10
1406; CHECK-NOV-NEXT:  .LBB15_27: # %entry
1407; CHECK-NOV-NEXT:    lui a5, 1048568
1408; CHECK-NOV-NEXT:    blt a7, a4, .LBB15_11
1409; CHECK-NOV-NEXT:  .LBB15_28: # %entry
1410; CHECK-NOV-NEXT:    lui a4, 1048568
1411; CHECK-NOV-NEXT:    blt a7, a2, .LBB15_12
1412; CHECK-NOV-NEXT:  .LBB15_29: # %entry
1413; CHECK-NOV-NEXT:    lui a2, 1048568
1414; CHECK-NOV-NEXT:    blt a7, a3, .LBB15_13
1415; CHECK-NOV-NEXT:  .LBB15_30: # %entry
1416; CHECK-NOV-NEXT:    lui a3, 1048568
1417; CHECK-NOV-NEXT:    blt a7, a1, .LBB15_14
1418; CHECK-NOV-NEXT:  .LBB15_31: # %entry
1419; CHECK-NOV-NEXT:    lui a1, 1048568
1420; CHECK-NOV-NEXT:    blt a7, s1, .LBB15_15
1421; CHECK-NOV-NEXT:  .LBB15_32: # %entry
1422; CHECK-NOV-NEXT:    lui s1, 1048568
1423; CHECK-NOV-NEXT:    bge a7, a0, .LBB15_16
1424; CHECK-NOV-NEXT:    j .LBB15_17
1425;
1426; CHECK-V-LABEL: stest_f16i16:
1427; CHECK-V:       # %bb.0: # %entry
1428; CHECK-V-NEXT:    addi sp, sp, -80
1429; CHECK-V-NEXT:    .cfi_def_cfa_offset 80
1430; CHECK-V-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
1431; CHECK-V-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
1432; CHECK-V-NEXT:    sd s1, 56(sp) # 8-byte Folded Spill
1433; CHECK-V-NEXT:    sd s2, 48(sp) # 8-byte Folded Spill
1434; CHECK-V-NEXT:    sd s3, 40(sp) # 8-byte Folded Spill
1435; CHECK-V-NEXT:    sd s4, 32(sp) # 8-byte Folded Spill
1436; CHECK-V-NEXT:    sd s5, 24(sp) # 8-byte Folded Spill
1437; CHECK-V-NEXT:    sd s6, 16(sp) # 8-byte Folded Spill
1438; CHECK-V-NEXT:    .cfi_offset ra, -8
1439; CHECK-V-NEXT:    .cfi_offset s0, -16
1440; CHECK-V-NEXT:    .cfi_offset s1, -24
1441; CHECK-V-NEXT:    .cfi_offset s2, -32
1442; CHECK-V-NEXT:    .cfi_offset s3, -40
1443; CHECK-V-NEXT:    .cfi_offset s4, -48
1444; CHECK-V-NEXT:    .cfi_offset s5, -56
1445; CHECK-V-NEXT:    .cfi_offset s6, -64
1446; CHECK-V-NEXT:    csrr a1, vlenb
1447; CHECK-V-NEXT:    slli a1, a1, 2
1448; CHECK-V-NEXT:    sub sp, sp, a1
1449; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
1450; CHECK-V-NEXT:    lhu s0, 0(a0)
1451; CHECK-V-NEXT:    lhu s1, 8(a0)
1452; CHECK-V-NEXT:    lhu s2, 16(a0)
1453; CHECK-V-NEXT:    lhu s3, 24(a0)
1454; CHECK-V-NEXT:    lhu s4, 32(a0)
1455; CHECK-V-NEXT:    lhu s5, 40(a0)
1456; CHECK-V-NEXT:    lhu s6, 48(a0)
1457; CHECK-V-NEXT:    lhu a0, 56(a0)
1458; CHECK-V-NEXT:    fmv.w.x fa0, a0
1459; CHECK-V-NEXT:    call __extendhfsf2
1460; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
1461; CHECK-V-NEXT:    fmv.w.x fa0, s6
1462; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1463; CHECK-V-NEXT:    vmv.s.x v8, a0
1464; CHECK-V-NEXT:    csrr a0, vlenb
1465; CHECK-V-NEXT:    slli a0, a0, 1
1466; CHECK-V-NEXT:    add a0, sp, a0
1467; CHECK-V-NEXT:    addi a0, a0, 16
1468; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1469; CHECK-V-NEXT:    call __extendhfsf2
1470; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
1471; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1472; CHECK-V-NEXT:    vmv.s.x v8, a0
1473; CHECK-V-NEXT:    csrr a0, vlenb
1474; CHECK-V-NEXT:    slli a0, a0, 1
1475; CHECK-V-NEXT:    add a0, sp, a0
1476; CHECK-V-NEXT:    addi a0, a0, 16
1477; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
1478; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
1479; CHECK-V-NEXT:    csrr a0, vlenb
1480; CHECK-V-NEXT:    add a0, sp, a0
1481; CHECK-V-NEXT:    addi a0, a0, 16
1482; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1483; CHECK-V-NEXT:    fmv.w.x fa0, s5
1484; CHECK-V-NEXT:    call __extendhfsf2
1485; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
1486; CHECK-V-NEXT:    fmv.w.x fa0, s4
1487; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1488; CHECK-V-NEXT:    vmv.s.x v8, a0
1489; CHECK-V-NEXT:    csrr a0, vlenb
1490; CHECK-V-NEXT:    slli a0, a0, 1
1491; CHECK-V-NEXT:    add a0, sp, a0
1492; CHECK-V-NEXT:    addi a0, a0, 16
1493; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1494; CHECK-V-NEXT:    call __extendhfsf2
1495; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
1496; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1497; CHECK-V-NEXT:    vmv.s.x v8, a0
1498; CHECK-V-NEXT:    csrr a0, vlenb
1499; CHECK-V-NEXT:    slli a0, a0, 1
1500; CHECK-V-NEXT:    add a0, sp, a0
1501; CHECK-V-NEXT:    addi a0, a0, 16
1502; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
1503; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
1504; CHECK-V-NEXT:    csrr a0, vlenb
1505; CHECK-V-NEXT:    add a0, sp, a0
1506; CHECK-V-NEXT:    addi a0, a0, 16
1507; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
1508; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1509; CHECK-V-NEXT:    vslideup.vi v8, v9, 2
1510; CHECK-V-NEXT:    csrr a0, vlenb
1511; CHECK-V-NEXT:    slli a0, a0, 1
1512; CHECK-V-NEXT:    add a0, sp, a0
1513; CHECK-V-NEXT:    addi a0, a0, 16
1514; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
1515; CHECK-V-NEXT:    fmv.w.x fa0, s3
1516; CHECK-V-NEXT:    call __extendhfsf2
1517; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
1518; CHECK-V-NEXT:    fmv.w.x fa0, s2
1519; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1520; CHECK-V-NEXT:    vmv.s.x v8, a0
1521; CHECK-V-NEXT:    addi a0, sp, 16
1522; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1523; CHECK-V-NEXT:    call __extendhfsf2
1524; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
1525; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1526; CHECK-V-NEXT:    vmv.s.x v8, a0
1527; CHECK-V-NEXT:    addi a0, sp, 16
1528; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
1529; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
1530; CHECK-V-NEXT:    csrr a0, vlenb
1531; CHECK-V-NEXT:    add a0, sp, a0
1532; CHECK-V-NEXT:    addi a0, a0, 16
1533; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1534; CHECK-V-NEXT:    fmv.w.x fa0, s1
1535; CHECK-V-NEXT:    call __extendhfsf2
1536; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
1537; CHECK-V-NEXT:    fmv.w.x fa0, s0
1538; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1539; CHECK-V-NEXT:    vmv.s.x v8, a0
1540; CHECK-V-NEXT:    addi a0, sp, 16
1541; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1542; CHECK-V-NEXT:    call __extendhfsf2
1543; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
1544; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1545; CHECK-V-NEXT:    vmv.s.x v10, a0
1546; CHECK-V-NEXT:    addi a0, sp, 16
1547; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
1548; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
1549; CHECK-V-NEXT:    csrr a0, vlenb
1550; CHECK-V-NEXT:    add a0, sp, a0
1551; CHECK-V-NEXT:    addi a0, a0, 16
1552; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
1553; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1554; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
1555; CHECK-V-NEXT:    csrr a0, vlenb
1556; CHECK-V-NEXT:    slli a0, a0, 1
1557; CHECK-V-NEXT:    add a0, sp, a0
1558; CHECK-V-NEXT:    addi a0, a0, 16
1559; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
1560; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1561; CHECK-V-NEXT:    vslideup.vi v10, v8, 4
1562; CHECK-V-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1563; CHECK-V-NEXT:    vnclip.wi v8, v10, 0
1564; CHECK-V-NEXT:    csrr a0, vlenb
1565; CHECK-V-NEXT:    slli a0, a0, 2
1566; CHECK-V-NEXT:    add sp, sp, a0
1567; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
1568; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
1569; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
1570; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
1571; CHECK-V-NEXT:    ld s2, 48(sp) # 8-byte Folded Reload
1572; CHECK-V-NEXT:    ld s3, 40(sp) # 8-byte Folded Reload
1573; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
1574; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
1575; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
1576; CHECK-V-NEXT:    .cfi_restore ra
1577; CHECK-V-NEXT:    .cfi_restore s0
1578; CHECK-V-NEXT:    .cfi_restore s1
1579; CHECK-V-NEXT:    .cfi_restore s2
1580; CHECK-V-NEXT:    .cfi_restore s3
1581; CHECK-V-NEXT:    .cfi_restore s4
1582; CHECK-V-NEXT:    .cfi_restore s5
1583; CHECK-V-NEXT:    .cfi_restore s6
1584; CHECK-V-NEXT:    addi sp, sp, 80
1585; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
1586; CHECK-V-NEXT:    ret
1587entry:
1588  %conv = fptosi <8 x half> %x to <8 x i32>
1589  %0 = icmp slt <8 x i32> %conv, <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
1590  %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>
1591  %1 = icmp sgt <8 x i32> %spec.store.select, <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
1592  %spec.store.select7 = select <8 x i1> %1, <8 x i32> %spec.store.select, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>
1593  %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16>
1594  ret <8 x i16> %conv6
1595}
1596
1597define <8 x i16> @utesth_f16i16(<8 x half> %x) {
1598; CHECK-NOV-LABEL: utesth_f16i16:
1599; CHECK-NOV:       # %bb.0: # %entry
1600; CHECK-NOV-NEXT:    addi sp, sp, -128
1601; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 128
1602; CHECK-NOV-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
1603; CHECK-NOV-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
1604; CHECK-NOV-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
1605; CHECK-NOV-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
1606; CHECK-NOV-NEXT:    sd s3, 88(sp) # 8-byte Folded Spill
1607; CHECK-NOV-NEXT:    sd s4, 80(sp) # 8-byte Folded Spill
1608; CHECK-NOV-NEXT:    sd s5, 72(sp) # 8-byte Folded Spill
1609; CHECK-NOV-NEXT:    sd s6, 64(sp) # 8-byte Folded Spill
1610; CHECK-NOV-NEXT:    sd s7, 56(sp) # 8-byte Folded Spill
1611; CHECK-NOV-NEXT:    fsd fs0, 48(sp) # 8-byte Folded Spill
1612; CHECK-NOV-NEXT:    fsd fs1, 40(sp) # 8-byte Folded Spill
1613; CHECK-NOV-NEXT:    fsd fs2, 32(sp) # 8-byte Folded Spill
1614; CHECK-NOV-NEXT:    fsd fs3, 24(sp) # 8-byte Folded Spill
1615; CHECK-NOV-NEXT:    fsd fs4, 16(sp) # 8-byte Folded Spill
1616; CHECK-NOV-NEXT:    fsd fs5, 8(sp) # 8-byte Folded Spill
1617; CHECK-NOV-NEXT:    fsd fs6, 0(sp) # 8-byte Folded Spill
1618; CHECK-NOV-NEXT:    .cfi_offset ra, -8
1619; CHECK-NOV-NEXT:    .cfi_offset s0, -16
1620; CHECK-NOV-NEXT:    .cfi_offset s1, -24
1621; CHECK-NOV-NEXT:    .cfi_offset s2, -32
1622; CHECK-NOV-NEXT:    .cfi_offset s3, -40
1623; CHECK-NOV-NEXT:    .cfi_offset s4, -48
1624; CHECK-NOV-NEXT:    .cfi_offset s5, -56
1625; CHECK-NOV-NEXT:    .cfi_offset s6, -64
1626; CHECK-NOV-NEXT:    .cfi_offset s7, -72
1627; CHECK-NOV-NEXT:    .cfi_offset fs0, -80
1628; CHECK-NOV-NEXT:    .cfi_offset fs1, -88
1629; CHECK-NOV-NEXT:    .cfi_offset fs2, -96
1630; CHECK-NOV-NEXT:    .cfi_offset fs3, -104
1631; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
1632; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
1633; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
1634; CHECK-NOV-NEXT:    .cfi_remember_state
1635; CHECK-NOV-NEXT:    lhu s1, 32(a1)
1636; CHECK-NOV-NEXT:    lhu s2, 40(a1)
1637; CHECK-NOV-NEXT:    lhu s3, 48(a1)
1638; CHECK-NOV-NEXT:    lhu s4, 56(a1)
1639; CHECK-NOV-NEXT:    lhu s5, 0(a1)
1640; CHECK-NOV-NEXT:    lhu a2, 8(a1)
1641; CHECK-NOV-NEXT:    lhu s6, 16(a1)
1642; CHECK-NOV-NEXT:    lhu s7, 24(a1)
1643; CHECK-NOV-NEXT:    mv s0, a0
1644; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
1645; CHECK-NOV-NEXT:    call __extendhfsf2
1646; CHECK-NOV-NEXT:    fmv.s fs6, fa0
1647; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
1648; CHECK-NOV-NEXT:    call __extendhfsf2
1649; CHECK-NOV-NEXT:    fmv.s fs5, fa0
1650; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
1651; CHECK-NOV-NEXT:    call __extendhfsf2
1652; CHECK-NOV-NEXT:    fmv.s fs4, fa0
1653; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
1654; CHECK-NOV-NEXT:    call __extendhfsf2
1655; CHECK-NOV-NEXT:    fmv.s fs3, fa0
1656; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
1657; CHECK-NOV-NEXT:    call __extendhfsf2
1658; CHECK-NOV-NEXT:    fmv.s fs2, fa0
1659; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
1660; CHECK-NOV-NEXT:    call __extendhfsf2
1661; CHECK-NOV-NEXT:    fmv.s fs1, fa0
1662; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
1663; CHECK-NOV-NEXT:    call __extendhfsf2
1664; CHECK-NOV-NEXT:    fmv.s fs0, fa0
1665; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
1666; CHECK-NOV-NEXT:    fcvt.lu.s s1, fs6, rtz
1667; CHECK-NOV-NEXT:    call __extendhfsf2
1668; CHECK-NOV-NEXT:    fcvt.lu.s a0, fa0, rtz
1669; CHECK-NOV-NEXT:    lui a3, 16
1670; CHECK-NOV-NEXT:    addiw a3, a3, -1
1671; CHECK-NOV-NEXT:    bgeu a0, a3, .LBB16_10
1672; CHECK-NOV-NEXT:  # %bb.1: # %entry
1673; CHECK-NOV-NEXT:    fcvt.lu.s a1, fs5, rtz
1674; CHECK-NOV-NEXT:    bgeu s1, a3, .LBB16_11
1675; CHECK-NOV-NEXT:  .LBB16_2: # %entry
1676; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs4, rtz
1677; CHECK-NOV-NEXT:    bgeu a1, a3, .LBB16_12
1678; CHECK-NOV-NEXT:  .LBB16_3: # %entry
1679; CHECK-NOV-NEXT:    fcvt.lu.s a4, fs3, rtz
1680; CHECK-NOV-NEXT:    bgeu a2, a3, .LBB16_13
1681; CHECK-NOV-NEXT:  .LBB16_4: # %entry
1682; CHECK-NOV-NEXT:    fcvt.lu.s a5, fs2, rtz
1683; CHECK-NOV-NEXT:    bgeu a4, a3, .LBB16_14
1684; CHECK-NOV-NEXT:  .LBB16_5: # %entry
1685; CHECK-NOV-NEXT:    fcvt.lu.s a6, fs1, rtz
1686; CHECK-NOV-NEXT:    bgeu a5, a3, .LBB16_15
1687; CHECK-NOV-NEXT:  .LBB16_6: # %entry
1688; CHECK-NOV-NEXT:    fcvt.lu.s a7, fs0, rtz
1689; CHECK-NOV-NEXT:    bgeu a6, a3, .LBB16_16
1690; CHECK-NOV-NEXT:  .LBB16_7: # %entry
1691; CHECK-NOV-NEXT:    bltu a7, a3, .LBB16_9
1692; CHECK-NOV-NEXT:  .LBB16_8: # %entry
1693; CHECK-NOV-NEXT:    mv a7, a3
1694; CHECK-NOV-NEXT:  .LBB16_9: # %entry
1695; CHECK-NOV-NEXT:    sh a4, 8(s0)
1696; CHECK-NOV-NEXT:    sh a5, 10(s0)
1697; CHECK-NOV-NEXT:    sh a6, 12(s0)
1698; CHECK-NOV-NEXT:    sh a7, 14(s0)
1699; CHECK-NOV-NEXT:    sh a0, 0(s0)
1700; CHECK-NOV-NEXT:    sh s1, 2(s0)
1701; CHECK-NOV-NEXT:    sh a1, 4(s0)
1702; CHECK-NOV-NEXT:    sh a2, 6(s0)
1703; CHECK-NOV-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
1704; CHECK-NOV-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
1705; CHECK-NOV-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
1706; CHECK-NOV-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
1707; CHECK-NOV-NEXT:    ld s3, 88(sp) # 8-byte Folded Reload
1708; CHECK-NOV-NEXT:    ld s4, 80(sp) # 8-byte Folded Reload
1709; CHECK-NOV-NEXT:    ld s5, 72(sp) # 8-byte Folded Reload
1710; CHECK-NOV-NEXT:    ld s6, 64(sp) # 8-byte Folded Reload
1711; CHECK-NOV-NEXT:    ld s7, 56(sp) # 8-byte Folded Reload
1712; CHECK-NOV-NEXT:    fld fs0, 48(sp) # 8-byte Folded Reload
1713; CHECK-NOV-NEXT:    fld fs1, 40(sp) # 8-byte Folded Reload
1714; CHECK-NOV-NEXT:    fld fs2, 32(sp) # 8-byte Folded Reload
1715; CHECK-NOV-NEXT:    fld fs3, 24(sp) # 8-byte Folded Reload
1716; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
1717; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
1718; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
1719; CHECK-NOV-NEXT:    .cfi_restore ra
1720; CHECK-NOV-NEXT:    .cfi_restore s0
1721; CHECK-NOV-NEXT:    .cfi_restore s1
1722; CHECK-NOV-NEXT:    .cfi_restore s2
1723; CHECK-NOV-NEXT:    .cfi_restore s3
1724; CHECK-NOV-NEXT:    .cfi_restore s4
1725; CHECK-NOV-NEXT:    .cfi_restore s5
1726; CHECK-NOV-NEXT:    .cfi_restore s6
1727; CHECK-NOV-NEXT:    .cfi_restore s7
1728; CHECK-NOV-NEXT:    .cfi_restore fs0
1729; CHECK-NOV-NEXT:    .cfi_restore fs1
1730; CHECK-NOV-NEXT:    .cfi_restore fs2
1731; CHECK-NOV-NEXT:    .cfi_restore fs3
1732; CHECK-NOV-NEXT:    .cfi_restore fs4
1733; CHECK-NOV-NEXT:    .cfi_restore fs5
1734; CHECK-NOV-NEXT:    .cfi_restore fs6
1735; CHECK-NOV-NEXT:    addi sp, sp, 128
1736; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
1737; CHECK-NOV-NEXT:    ret
1738; CHECK-NOV-NEXT:  .LBB16_10: # %entry
1739; CHECK-NOV-NEXT:    .cfi_restore_state
1740; CHECK-NOV-NEXT:    mv a0, a3
1741; CHECK-NOV-NEXT:    fcvt.lu.s a1, fs5, rtz
1742; CHECK-NOV-NEXT:    bltu s1, a3, .LBB16_2
1743; CHECK-NOV-NEXT:  .LBB16_11: # %entry
1744; CHECK-NOV-NEXT:    mv s1, a3
1745; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs4, rtz
1746; CHECK-NOV-NEXT:    bltu a1, a3, .LBB16_3
1747; CHECK-NOV-NEXT:  .LBB16_12: # %entry
1748; CHECK-NOV-NEXT:    mv a1, a3
1749; CHECK-NOV-NEXT:    fcvt.lu.s a4, fs3, rtz
1750; CHECK-NOV-NEXT:    bltu a2, a3, .LBB16_4
1751; CHECK-NOV-NEXT:  .LBB16_13: # %entry
1752; CHECK-NOV-NEXT:    mv a2, a3
1753; CHECK-NOV-NEXT:    fcvt.lu.s a5, fs2, rtz
1754; CHECK-NOV-NEXT:    bltu a4, a3, .LBB16_5
1755; CHECK-NOV-NEXT:  .LBB16_14: # %entry
1756; CHECK-NOV-NEXT:    mv a4, a3
1757; CHECK-NOV-NEXT:    fcvt.lu.s a6, fs1, rtz
1758; CHECK-NOV-NEXT:    bltu a5, a3, .LBB16_6
1759; CHECK-NOV-NEXT:  .LBB16_15: # %entry
1760; CHECK-NOV-NEXT:    mv a5, a3
1761; CHECK-NOV-NEXT:    fcvt.lu.s a7, fs0, rtz
1762; CHECK-NOV-NEXT:    bltu a6, a3, .LBB16_7
1763; CHECK-NOV-NEXT:  .LBB16_16: # %entry
1764; CHECK-NOV-NEXT:    mv a6, a3
1765; CHECK-NOV-NEXT:    bgeu a7, a3, .LBB16_8
1766; CHECK-NOV-NEXT:    j .LBB16_9
1767;
1768; CHECK-V-LABEL: utesth_f16i16:
1769; CHECK-V:       # %bb.0: # %entry
1770; CHECK-V-NEXT:    addi sp, sp, -80
1771; CHECK-V-NEXT:    .cfi_def_cfa_offset 80
1772; CHECK-V-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
1773; CHECK-V-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
1774; CHECK-V-NEXT:    sd s1, 56(sp) # 8-byte Folded Spill
1775; CHECK-V-NEXT:    sd s2, 48(sp) # 8-byte Folded Spill
1776; CHECK-V-NEXT:    sd s3, 40(sp) # 8-byte Folded Spill
1777; CHECK-V-NEXT:    sd s4, 32(sp) # 8-byte Folded Spill
1778; CHECK-V-NEXT:    sd s5, 24(sp) # 8-byte Folded Spill
1779; CHECK-V-NEXT:    sd s6, 16(sp) # 8-byte Folded Spill
1780; CHECK-V-NEXT:    .cfi_offset ra, -8
1781; CHECK-V-NEXT:    .cfi_offset s0, -16
1782; CHECK-V-NEXT:    .cfi_offset s1, -24
1783; CHECK-V-NEXT:    .cfi_offset s2, -32
1784; CHECK-V-NEXT:    .cfi_offset s3, -40
1785; CHECK-V-NEXT:    .cfi_offset s4, -48
1786; CHECK-V-NEXT:    .cfi_offset s5, -56
1787; CHECK-V-NEXT:    .cfi_offset s6, -64
1788; CHECK-V-NEXT:    csrr a1, vlenb
1789; CHECK-V-NEXT:    slli a1, a1, 2
1790; CHECK-V-NEXT:    sub sp, sp, a1
1791; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
1792; CHECK-V-NEXT:    lhu s0, 0(a0)
1793; CHECK-V-NEXT:    lhu s1, 8(a0)
1794; CHECK-V-NEXT:    lhu s2, 16(a0)
1795; CHECK-V-NEXT:    lhu s3, 24(a0)
1796; CHECK-V-NEXT:    lhu s4, 32(a0)
1797; CHECK-V-NEXT:    lhu s5, 40(a0)
1798; CHECK-V-NEXT:    lhu s6, 48(a0)
1799; CHECK-V-NEXT:    lhu a0, 56(a0)
1800; CHECK-V-NEXT:    fmv.w.x fa0, a0
1801; CHECK-V-NEXT:    call __extendhfsf2
1802; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
1803; CHECK-V-NEXT:    fmv.w.x fa0, s6
1804; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1805; CHECK-V-NEXT:    vmv.s.x v8, a0
1806; CHECK-V-NEXT:    csrr a0, vlenb
1807; CHECK-V-NEXT:    slli a0, a0, 1
1808; CHECK-V-NEXT:    add a0, sp, a0
1809; CHECK-V-NEXT:    addi a0, a0, 16
1810; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1811; CHECK-V-NEXT:    call __extendhfsf2
1812; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
1813; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1814; CHECK-V-NEXT:    vmv.s.x v8, a0
1815; CHECK-V-NEXT:    csrr a0, vlenb
1816; CHECK-V-NEXT:    slli a0, a0, 1
1817; CHECK-V-NEXT:    add a0, sp, a0
1818; CHECK-V-NEXT:    addi a0, a0, 16
1819; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
1820; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
1821; CHECK-V-NEXT:    csrr a0, vlenb
1822; CHECK-V-NEXT:    add a0, sp, a0
1823; CHECK-V-NEXT:    addi a0, a0, 16
1824; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1825; CHECK-V-NEXT:    fmv.w.x fa0, s5
1826; CHECK-V-NEXT:    call __extendhfsf2
1827; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
1828; CHECK-V-NEXT:    fmv.w.x fa0, s4
1829; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1830; CHECK-V-NEXT:    vmv.s.x v8, a0
1831; CHECK-V-NEXT:    csrr a0, vlenb
1832; CHECK-V-NEXT:    slli a0, a0, 1
1833; CHECK-V-NEXT:    add a0, sp, a0
1834; CHECK-V-NEXT:    addi a0, a0, 16
1835; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1836; CHECK-V-NEXT:    call __extendhfsf2
1837; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
1838; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1839; CHECK-V-NEXT:    vmv.s.x v8, a0
1840; CHECK-V-NEXT:    csrr a0, vlenb
1841; CHECK-V-NEXT:    slli a0, a0, 1
1842; CHECK-V-NEXT:    add a0, sp, a0
1843; CHECK-V-NEXT:    addi a0, a0, 16
1844; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
1845; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
1846; CHECK-V-NEXT:    csrr a0, vlenb
1847; CHECK-V-NEXT:    add a0, sp, a0
1848; CHECK-V-NEXT:    addi a0, a0, 16
1849; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
1850; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1851; CHECK-V-NEXT:    vslideup.vi v8, v9, 2
1852; CHECK-V-NEXT:    csrr a0, vlenb
1853; CHECK-V-NEXT:    slli a0, a0, 1
1854; CHECK-V-NEXT:    add a0, sp, a0
1855; CHECK-V-NEXT:    addi a0, a0, 16
1856; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
1857; CHECK-V-NEXT:    fmv.w.x fa0, s3
1858; CHECK-V-NEXT:    call __extendhfsf2
1859; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
1860; CHECK-V-NEXT:    fmv.w.x fa0, s2
1861; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1862; CHECK-V-NEXT:    vmv.s.x v8, a0
1863; CHECK-V-NEXT:    addi a0, sp, 16
1864; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1865; CHECK-V-NEXT:    call __extendhfsf2
1866; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
1867; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1868; CHECK-V-NEXT:    vmv.s.x v8, a0
1869; CHECK-V-NEXT:    addi a0, sp, 16
1870; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
1871; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
1872; CHECK-V-NEXT:    csrr a0, vlenb
1873; CHECK-V-NEXT:    add a0, sp, a0
1874; CHECK-V-NEXT:    addi a0, a0, 16
1875; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1876; CHECK-V-NEXT:    fmv.w.x fa0, s1
1877; CHECK-V-NEXT:    call __extendhfsf2
1878; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
1879; CHECK-V-NEXT:    fmv.w.x fa0, s0
1880; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1881; CHECK-V-NEXT:    vmv.s.x v8, a0
1882; CHECK-V-NEXT:    addi a0, sp, 16
1883; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
1884; CHECK-V-NEXT:    call __extendhfsf2
1885; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
1886; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
1887; CHECK-V-NEXT:    vmv.s.x v10, a0
1888; CHECK-V-NEXT:    addi a0, sp, 16
1889; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
1890; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
1891; CHECK-V-NEXT:    csrr a0, vlenb
1892; CHECK-V-NEXT:    add a0, sp, a0
1893; CHECK-V-NEXT:    addi a0, a0, 16
1894; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
1895; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1896; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
1897; CHECK-V-NEXT:    csrr a0, vlenb
1898; CHECK-V-NEXT:    slli a0, a0, 1
1899; CHECK-V-NEXT:    add a0, sp, a0
1900; CHECK-V-NEXT:    addi a0, a0, 16
1901; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
1902; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1903; CHECK-V-NEXT:    vslideup.vi v10, v8, 4
1904; CHECK-V-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1905; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
1906; CHECK-V-NEXT:    csrr a0, vlenb
1907; CHECK-V-NEXT:    slli a0, a0, 2
1908; CHECK-V-NEXT:    add sp, sp, a0
1909; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
1910; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
1911; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
1912; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
1913; CHECK-V-NEXT:    ld s2, 48(sp) # 8-byte Folded Reload
1914; CHECK-V-NEXT:    ld s3, 40(sp) # 8-byte Folded Reload
1915; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
1916; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
1917; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
1918; CHECK-V-NEXT:    .cfi_restore ra
1919; CHECK-V-NEXT:    .cfi_restore s0
1920; CHECK-V-NEXT:    .cfi_restore s1
1921; CHECK-V-NEXT:    .cfi_restore s2
1922; CHECK-V-NEXT:    .cfi_restore s3
1923; CHECK-V-NEXT:    .cfi_restore s4
1924; CHECK-V-NEXT:    .cfi_restore s5
1925; CHECK-V-NEXT:    .cfi_restore s6
1926; CHECK-V-NEXT:    addi sp, sp, 80
1927; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
1928; CHECK-V-NEXT:    ret
1929entry:
1930  %conv = fptoui <8 x half> %x to <8 x i32>
1931  %0 = icmp ult <8 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
1932  %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
1933  %conv6 = trunc <8 x i32> %spec.store.select to <8 x i16>
1934  ret <8 x i16> %conv6
1935}
1936
1937define <8 x i16> @ustest_f16i16(<8 x half> %x) {
1938; CHECK-NOV-LABEL: ustest_f16i16:
1939; CHECK-NOV:       # %bb.0: # %entry
1940; CHECK-NOV-NEXT:    addi sp, sp, -128
1941; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 128
1942; CHECK-NOV-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
1943; CHECK-NOV-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
1944; CHECK-NOV-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
1945; CHECK-NOV-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
1946; CHECK-NOV-NEXT:    sd s3, 88(sp) # 8-byte Folded Spill
1947; CHECK-NOV-NEXT:    sd s4, 80(sp) # 8-byte Folded Spill
1948; CHECK-NOV-NEXT:    sd s5, 72(sp) # 8-byte Folded Spill
1949; CHECK-NOV-NEXT:    sd s6, 64(sp) # 8-byte Folded Spill
1950; CHECK-NOV-NEXT:    sd s7, 56(sp) # 8-byte Folded Spill
1951; CHECK-NOV-NEXT:    fsd fs0, 48(sp) # 8-byte Folded Spill
1952; CHECK-NOV-NEXT:    fsd fs1, 40(sp) # 8-byte Folded Spill
1953; CHECK-NOV-NEXT:    fsd fs2, 32(sp) # 8-byte Folded Spill
1954; CHECK-NOV-NEXT:    fsd fs3, 24(sp) # 8-byte Folded Spill
1955; CHECK-NOV-NEXT:    fsd fs4, 16(sp) # 8-byte Folded Spill
1956; CHECK-NOV-NEXT:    fsd fs5, 8(sp) # 8-byte Folded Spill
1957; CHECK-NOV-NEXT:    fsd fs6, 0(sp) # 8-byte Folded Spill
1958; CHECK-NOV-NEXT:    .cfi_offset ra, -8
1959; CHECK-NOV-NEXT:    .cfi_offset s0, -16
1960; CHECK-NOV-NEXT:    .cfi_offset s1, -24
1961; CHECK-NOV-NEXT:    .cfi_offset s2, -32
1962; CHECK-NOV-NEXT:    .cfi_offset s3, -40
1963; CHECK-NOV-NEXT:    .cfi_offset s4, -48
1964; CHECK-NOV-NEXT:    .cfi_offset s5, -56
1965; CHECK-NOV-NEXT:    .cfi_offset s6, -64
1966; CHECK-NOV-NEXT:    .cfi_offset s7, -72
1967; CHECK-NOV-NEXT:    .cfi_offset fs0, -80
1968; CHECK-NOV-NEXT:    .cfi_offset fs1, -88
1969; CHECK-NOV-NEXT:    .cfi_offset fs2, -96
1970; CHECK-NOV-NEXT:    .cfi_offset fs3, -104
1971; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
1972; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
1973; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
1974; CHECK-NOV-NEXT:    .cfi_remember_state
1975; CHECK-NOV-NEXT:    lhu s1, 32(a1)
1976; CHECK-NOV-NEXT:    lhu s2, 40(a1)
1977; CHECK-NOV-NEXT:    lhu a2, 48(a1)
1978; CHECK-NOV-NEXT:    lhu s3, 56(a1)
1979; CHECK-NOV-NEXT:    lhu s4, 0(a1)
1980; CHECK-NOV-NEXT:    lhu s5, 8(a1)
1981; CHECK-NOV-NEXT:    lhu s6, 16(a1)
1982; CHECK-NOV-NEXT:    lhu s7, 24(a1)
1983; CHECK-NOV-NEXT:    mv s0, a0
1984; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
1985; CHECK-NOV-NEXT:    call __extendhfsf2
1986; CHECK-NOV-NEXT:    fmv.s fs6, fa0
1987; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
1988; CHECK-NOV-NEXT:    call __extendhfsf2
1989; CHECK-NOV-NEXT:    fmv.s fs5, fa0
1990; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
1991; CHECK-NOV-NEXT:    call __extendhfsf2
1992; CHECK-NOV-NEXT:    fmv.s fs4, fa0
1993; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
1994; CHECK-NOV-NEXT:    call __extendhfsf2
1995; CHECK-NOV-NEXT:    fmv.s fs3, fa0
1996; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
1997; CHECK-NOV-NEXT:    call __extendhfsf2
1998; CHECK-NOV-NEXT:    fmv.s fs2, fa0
1999; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
2000; CHECK-NOV-NEXT:    call __extendhfsf2
2001; CHECK-NOV-NEXT:    fmv.s fs1, fa0
2002; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
2003; CHECK-NOV-NEXT:    call __extendhfsf2
2004; CHECK-NOV-NEXT:    fmv.s fs0, fa0
2005; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
2006; CHECK-NOV-NEXT:    fcvt.l.s s1, fs6, rtz
2007; CHECK-NOV-NEXT:    call __extendhfsf2
2008; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
2009; CHECK-NOV-NEXT:    lui a4, 16
2010; CHECK-NOV-NEXT:    addiw a4, a4, -1
2011; CHECK-NOV-NEXT:    bge a0, a4, .LBB17_10
2012; CHECK-NOV-NEXT:  # %bb.1: # %entry
2013; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
2014; CHECK-NOV-NEXT:    bge s1, a4, .LBB17_11
2015; CHECK-NOV-NEXT:  .LBB17_2: # %entry
2016; CHECK-NOV-NEXT:    fcvt.l.s a2, fs4, rtz
2017; CHECK-NOV-NEXT:    bge a1, a4, .LBB17_12
2018; CHECK-NOV-NEXT:  .LBB17_3: # %entry
2019; CHECK-NOV-NEXT:    fcvt.l.s a3, fs3, rtz
2020; CHECK-NOV-NEXT:    bge a2, a4, .LBB17_13
2021; CHECK-NOV-NEXT:  .LBB17_4: # %entry
2022; CHECK-NOV-NEXT:    fcvt.l.s a5, fs2, rtz
2023; CHECK-NOV-NEXT:    bge a3, a4, .LBB17_14
2024; CHECK-NOV-NEXT:  .LBB17_5: # %entry
2025; CHECK-NOV-NEXT:    fcvt.l.s a6, fs1, rtz
2026; CHECK-NOV-NEXT:    bge a5, a4, .LBB17_15
2027; CHECK-NOV-NEXT:  .LBB17_6: # %entry
2028; CHECK-NOV-NEXT:    fcvt.l.s a7, fs0, rtz
2029; CHECK-NOV-NEXT:    bge a6, a4, .LBB17_16
2030; CHECK-NOV-NEXT:  .LBB17_7: # %entry
2031; CHECK-NOV-NEXT:    blt a7, a4, .LBB17_9
2032; CHECK-NOV-NEXT:  .LBB17_8: # %entry
2033; CHECK-NOV-NEXT:    mv a7, a4
2034; CHECK-NOV-NEXT:  .LBB17_9: # %entry
2035; CHECK-NOV-NEXT:    sgtz a4, a0
2036; CHECK-NOV-NEXT:    sgtz t0, s1
2037; CHECK-NOV-NEXT:    sgtz t1, a1
2038; CHECK-NOV-NEXT:    sgtz t2, a2
2039; CHECK-NOV-NEXT:    sgtz t3, a3
2040; CHECK-NOV-NEXT:    sgtz t4, a5
2041; CHECK-NOV-NEXT:    sgtz t5, a6
2042; CHECK-NOV-NEXT:    sgtz t6, a7
2043; CHECK-NOV-NEXT:    negw t6, t6
2044; CHECK-NOV-NEXT:    negw t5, t5
2045; CHECK-NOV-NEXT:    negw t4, t4
2046; CHECK-NOV-NEXT:    negw t3, t3
2047; CHECK-NOV-NEXT:    negw t2, t2
2048; CHECK-NOV-NEXT:    negw t1, t1
2049; CHECK-NOV-NEXT:    negw t0, t0
2050; CHECK-NOV-NEXT:    negw a4, a4
2051; CHECK-NOV-NEXT:    and a7, t6, a7
2052; CHECK-NOV-NEXT:    and a6, t5, a6
2053; CHECK-NOV-NEXT:    and a5, t4, a5
2054; CHECK-NOV-NEXT:    and a3, t3, a3
2055; CHECK-NOV-NEXT:    and a2, t2, a2
2056; CHECK-NOV-NEXT:    and a1, t1, a1
2057; CHECK-NOV-NEXT:    and t0, t0, s1
2058; CHECK-NOV-NEXT:    and a0, a4, a0
2059; CHECK-NOV-NEXT:    sh a2, 8(s0)
2060; CHECK-NOV-NEXT:    sh a1, 10(s0)
2061; CHECK-NOV-NEXT:    sh t0, 12(s0)
2062; CHECK-NOV-NEXT:    sh a0, 14(s0)
2063; CHECK-NOV-NEXT:    sh a7, 0(s0)
2064; CHECK-NOV-NEXT:    sh a6, 2(s0)
2065; CHECK-NOV-NEXT:    sh a5, 4(s0)
2066; CHECK-NOV-NEXT:    sh a3, 6(s0)
2067; CHECK-NOV-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
2068; CHECK-NOV-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
2069; CHECK-NOV-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
2070; CHECK-NOV-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
2071; CHECK-NOV-NEXT:    ld s3, 88(sp) # 8-byte Folded Reload
2072; CHECK-NOV-NEXT:    ld s4, 80(sp) # 8-byte Folded Reload
2073; CHECK-NOV-NEXT:    ld s5, 72(sp) # 8-byte Folded Reload
2074; CHECK-NOV-NEXT:    ld s6, 64(sp) # 8-byte Folded Reload
2075; CHECK-NOV-NEXT:    ld s7, 56(sp) # 8-byte Folded Reload
2076; CHECK-NOV-NEXT:    fld fs0, 48(sp) # 8-byte Folded Reload
2077; CHECK-NOV-NEXT:    fld fs1, 40(sp) # 8-byte Folded Reload
2078; CHECK-NOV-NEXT:    fld fs2, 32(sp) # 8-byte Folded Reload
2079; CHECK-NOV-NEXT:    fld fs3, 24(sp) # 8-byte Folded Reload
2080; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
2081; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
2082; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
2083; CHECK-NOV-NEXT:    .cfi_restore ra
2084; CHECK-NOV-NEXT:    .cfi_restore s0
2085; CHECK-NOV-NEXT:    .cfi_restore s1
2086; CHECK-NOV-NEXT:    .cfi_restore s2
2087; CHECK-NOV-NEXT:    .cfi_restore s3
2088; CHECK-NOV-NEXT:    .cfi_restore s4
2089; CHECK-NOV-NEXT:    .cfi_restore s5
2090; CHECK-NOV-NEXT:    .cfi_restore s6
2091; CHECK-NOV-NEXT:    .cfi_restore s7
2092; CHECK-NOV-NEXT:    .cfi_restore fs0
2093; CHECK-NOV-NEXT:    .cfi_restore fs1
2094; CHECK-NOV-NEXT:    .cfi_restore fs2
2095; CHECK-NOV-NEXT:    .cfi_restore fs3
2096; CHECK-NOV-NEXT:    .cfi_restore fs4
2097; CHECK-NOV-NEXT:    .cfi_restore fs5
2098; CHECK-NOV-NEXT:    .cfi_restore fs6
2099; CHECK-NOV-NEXT:    addi sp, sp, 128
2100; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
2101; CHECK-NOV-NEXT:    ret
2102; CHECK-NOV-NEXT:  .LBB17_10: # %entry
2103; CHECK-NOV-NEXT:    .cfi_restore_state
2104; CHECK-NOV-NEXT:    mv a0, a4
2105; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
2106; CHECK-NOV-NEXT:    blt s1, a4, .LBB17_2
2107; CHECK-NOV-NEXT:  .LBB17_11: # %entry
2108; CHECK-NOV-NEXT:    mv s1, a4
2109; CHECK-NOV-NEXT:    fcvt.l.s a2, fs4, rtz
2110; CHECK-NOV-NEXT:    blt a1, a4, .LBB17_3
2111; CHECK-NOV-NEXT:  .LBB17_12: # %entry
2112; CHECK-NOV-NEXT:    mv a1, a4
2113; CHECK-NOV-NEXT:    fcvt.l.s a3, fs3, rtz
2114; CHECK-NOV-NEXT:    blt a2, a4, .LBB17_4
2115; CHECK-NOV-NEXT:  .LBB17_13: # %entry
2116; CHECK-NOV-NEXT:    mv a2, a4
2117; CHECK-NOV-NEXT:    fcvt.l.s a5, fs2, rtz
2118; CHECK-NOV-NEXT:    blt a3, a4, .LBB17_5
2119; CHECK-NOV-NEXT:  .LBB17_14: # %entry
2120; CHECK-NOV-NEXT:    mv a3, a4
2121; CHECK-NOV-NEXT:    fcvt.l.s a6, fs1, rtz
2122; CHECK-NOV-NEXT:    blt a5, a4, .LBB17_6
2123; CHECK-NOV-NEXT:  .LBB17_15: # %entry
2124; CHECK-NOV-NEXT:    mv a5, a4
2125; CHECK-NOV-NEXT:    fcvt.l.s a7, fs0, rtz
2126; CHECK-NOV-NEXT:    blt a6, a4, .LBB17_7
2127; CHECK-NOV-NEXT:  .LBB17_16: # %entry
2128; CHECK-NOV-NEXT:    mv a6, a4
2129; CHECK-NOV-NEXT:    bge a7, a4, .LBB17_8
2130; CHECK-NOV-NEXT:    j .LBB17_9
2131;
2132; CHECK-V-LABEL: ustest_f16i16:
2133; CHECK-V:       # %bb.0: # %entry
2134; CHECK-V-NEXT:    addi sp, sp, -80
2135; CHECK-V-NEXT:    .cfi_def_cfa_offset 80
2136; CHECK-V-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
2137; CHECK-V-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
2138; CHECK-V-NEXT:    sd s1, 56(sp) # 8-byte Folded Spill
2139; CHECK-V-NEXT:    sd s2, 48(sp) # 8-byte Folded Spill
2140; CHECK-V-NEXT:    sd s3, 40(sp) # 8-byte Folded Spill
2141; CHECK-V-NEXT:    sd s4, 32(sp) # 8-byte Folded Spill
2142; CHECK-V-NEXT:    sd s5, 24(sp) # 8-byte Folded Spill
2143; CHECK-V-NEXT:    sd s6, 16(sp) # 8-byte Folded Spill
2144; CHECK-V-NEXT:    .cfi_offset ra, -8
2145; CHECK-V-NEXT:    .cfi_offset s0, -16
2146; CHECK-V-NEXT:    .cfi_offset s1, -24
2147; CHECK-V-NEXT:    .cfi_offset s2, -32
2148; CHECK-V-NEXT:    .cfi_offset s3, -40
2149; CHECK-V-NEXT:    .cfi_offset s4, -48
2150; CHECK-V-NEXT:    .cfi_offset s5, -56
2151; CHECK-V-NEXT:    .cfi_offset s6, -64
2152; CHECK-V-NEXT:    csrr a1, vlenb
2153; CHECK-V-NEXT:    slli a1, a1, 2
2154; CHECK-V-NEXT:    sub sp, sp, a1
2155; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
2156; CHECK-V-NEXT:    lhu s0, 0(a0)
2157; CHECK-V-NEXT:    lhu s1, 8(a0)
2158; CHECK-V-NEXT:    lhu s2, 16(a0)
2159; CHECK-V-NEXT:    lhu s3, 24(a0)
2160; CHECK-V-NEXT:    lhu s4, 32(a0)
2161; CHECK-V-NEXT:    lhu s5, 40(a0)
2162; CHECK-V-NEXT:    lhu s6, 48(a0)
2163; CHECK-V-NEXT:    lhu a0, 56(a0)
2164; CHECK-V-NEXT:    fmv.w.x fa0, a0
2165; CHECK-V-NEXT:    call __extendhfsf2
2166; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
2167; CHECK-V-NEXT:    fmv.w.x fa0, s6
2168; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
2169; CHECK-V-NEXT:    vmv.s.x v8, a0
2170; CHECK-V-NEXT:    csrr a0, vlenb
2171; CHECK-V-NEXT:    slli a0, a0, 1
2172; CHECK-V-NEXT:    add a0, sp, a0
2173; CHECK-V-NEXT:    addi a0, a0, 16
2174; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2175; CHECK-V-NEXT:    call __extendhfsf2
2176; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
2177; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
2178; CHECK-V-NEXT:    vmv.s.x v8, a0
2179; CHECK-V-NEXT:    csrr a0, vlenb
2180; CHECK-V-NEXT:    slli a0, a0, 1
2181; CHECK-V-NEXT:    add a0, sp, a0
2182; CHECK-V-NEXT:    addi a0, a0, 16
2183; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
2184; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
2185; CHECK-V-NEXT:    csrr a0, vlenb
2186; CHECK-V-NEXT:    add a0, sp, a0
2187; CHECK-V-NEXT:    addi a0, a0, 16
2188; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2189; CHECK-V-NEXT:    fmv.w.x fa0, s5
2190; CHECK-V-NEXT:    call __extendhfsf2
2191; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
2192; CHECK-V-NEXT:    fmv.w.x fa0, s4
2193; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
2194; CHECK-V-NEXT:    vmv.s.x v8, a0
2195; CHECK-V-NEXT:    csrr a0, vlenb
2196; CHECK-V-NEXT:    slli a0, a0, 1
2197; CHECK-V-NEXT:    add a0, sp, a0
2198; CHECK-V-NEXT:    addi a0, a0, 16
2199; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2200; CHECK-V-NEXT:    call __extendhfsf2
2201; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
2202; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
2203; CHECK-V-NEXT:    vmv.s.x v8, a0
2204; CHECK-V-NEXT:    csrr a0, vlenb
2205; CHECK-V-NEXT:    slli a0, a0, 1
2206; CHECK-V-NEXT:    add a0, sp, a0
2207; CHECK-V-NEXT:    addi a0, a0, 16
2208; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
2209; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
2210; CHECK-V-NEXT:    csrr a0, vlenb
2211; CHECK-V-NEXT:    add a0, sp, a0
2212; CHECK-V-NEXT:    addi a0, a0, 16
2213; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
2214; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
2215; CHECK-V-NEXT:    vslideup.vi v8, v9, 2
2216; CHECK-V-NEXT:    csrr a0, vlenb
2217; CHECK-V-NEXT:    slli a0, a0, 1
2218; CHECK-V-NEXT:    add a0, sp, a0
2219; CHECK-V-NEXT:    addi a0, a0, 16
2220; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
2221; CHECK-V-NEXT:    fmv.w.x fa0, s3
2222; CHECK-V-NEXT:    call __extendhfsf2
2223; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
2224; CHECK-V-NEXT:    fmv.w.x fa0, s2
2225; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
2226; CHECK-V-NEXT:    vmv.s.x v8, a0
2227; CHECK-V-NEXT:    addi a0, sp, 16
2228; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2229; CHECK-V-NEXT:    call __extendhfsf2
2230; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
2231; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
2232; CHECK-V-NEXT:    vmv.s.x v8, a0
2233; CHECK-V-NEXT:    addi a0, sp, 16
2234; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
2235; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
2236; CHECK-V-NEXT:    csrr a0, vlenb
2237; CHECK-V-NEXT:    add a0, sp, a0
2238; CHECK-V-NEXT:    addi a0, a0, 16
2239; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2240; CHECK-V-NEXT:    fmv.w.x fa0, s1
2241; CHECK-V-NEXT:    call __extendhfsf2
2242; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
2243; CHECK-V-NEXT:    fmv.w.x fa0, s0
2244; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
2245; CHECK-V-NEXT:    vmv.s.x v8, a0
2246; CHECK-V-NEXT:    addi a0, sp, 16
2247; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2248; CHECK-V-NEXT:    call __extendhfsf2
2249; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
2250; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
2251; CHECK-V-NEXT:    vmv.s.x v8, a0
2252; CHECK-V-NEXT:    addi a0, sp, 16
2253; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
2254; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
2255; CHECK-V-NEXT:    csrr a0, vlenb
2256; CHECK-V-NEXT:    add a0, sp, a0
2257; CHECK-V-NEXT:    addi a0, a0, 16
2258; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
2259; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
2260; CHECK-V-NEXT:    vslideup.vi v8, v9, 2
2261; CHECK-V-NEXT:    csrr a0, vlenb
2262; CHECK-V-NEXT:    slli a0, a0, 1
2263; CHECK-V-NEXT:    add a0, sp, a0
2264; CHECK-V-NEXT:    addi a0, a0, 16
2265; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
2266; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
2267; CHECK-V-NEXT:    vslideup.vi v8, v10, 4
2268; CHECK-V-NEXT:    vmax.vx v10, v8, zero
2269; CHECK-V-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
2270; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
2271; CHECK-V-NEXT:    csrr a0, vlenb
2272; CHECK-V-NEXT:    slli a0, a0, 2
2273; CHECK-V-NEXT:    add sp, sp, a0
2274; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
2275; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
2276; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
2277; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
2278; CHECK-V-NEXT:    ld s2, 48(sp) # 8-byte Folded Reload
2279; CHECK-V-NEXT:    ld s3, 40(sp) # 8-byte Folded Reload
2280; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
2281; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
2282; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
2283; CHECK-V-NEXT:    .cfi_restore ra
2284; CHECK-V-NEXT:    .cfi_restore s0
2285; CHECK-V-NEXT:    .cfi_restore s1
2286; CHECK-V-NEXT:    .cfi_restore s2
2287; CHECK-V-NEXT:    .cfi_restore s3
2288; CHECK-V-NEXT:    .cfi_restore s4
2289; CHECK-V-NEXT:    .cfi_restore s5
2290; CHECK-V-NEXT:    .cfi_restore s6
2291; CHECK-V-NEXT:    addi sp, sp, 80
2292; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
2293; CHECK-V-NEXT:    ret
2294entry:
2295  %conv = fptosi <8 x half> %x to <8 x i32>
2296  %0 = icmp slt <8 x i32> %conv, <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
2297  %spec.store.select = select <8 x i1> %0, <8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>
2298  %1 = icmp sgt <8 x i32> %spec.store.select, zeroinitializer
2299  %spec.store.select7 = select <8 x i1> %1, <8 x i32> %spec.store.select, <8 x i32> zeroinitializer
2300  %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16>
2301  ret <8 x i16> %conv6
2302}
2303
2304; i64 saturate
2305
2306define <2 x i64> @stest_f64i64(<2 x double> %x) {
2307; CHECK-NOV-LABEL: stest_f64i64:
2308; CHECK-NOV:       # %bb.0: # %entry
2309; CHECK-NOV-NEXT:    addi sp, sp, -32
2310; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
2311; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
2312; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
2313; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
2314; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
2315; CHECK-NOV-NEXT:    .cfi_offset ra, -8
2316; CHECK-NOV-NEXT:    .cfi_offset s0, -16
2317; CHECK-NOV-NEXT:    .cfi_offset s1, -24
2318; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
2319; CHECK-NOV-NEXT:    fmv.d fs0, fa1
2320; CHECK-NOV-NEXT:    call __fixdfti
2321; CHECK-NOV-NEXT:    mv s0, a0
2322; CHECK-NOV-NEXT:    mv s1, a1
2323; CHECK-NOV-NEXT:    fmv.d fa0, fs0
2324; CHECK-NOV-NEXT:    call __fixdfti
2325; CHECK-NOV-NEXT:    mv a2, a0
2326; CHECK-NOV-NEXT:    li a0, -1
2327; CHECK-NOV-NEXT:    srli a3, a0, 1
2328; CHECK-NOV-NEXT:    beqz a1, .LBB18_3
2329; CHECK-NOV-NEXT:  # %bb.1: # %entry
2330; CHECK-NOV-NEXT:    slti a4, a1, 0
2331; CHECK-NOV-NEXT:    bnez s1, .LBB18_4
2332; CHECK-NOV-NEXT:  .LBB18_2:
2333; CHECK-NOV-NEXT:    sltu a5, s0, a3
2334; CHECK-NOV-NEXT:    beqz a5, .LBB18_5
2335; CHECK-NOV-NEXT:    j .LBB18_6
2336; CHECK-NOV-NEXT:  .LBB18_3:
2337; CHECK-NOV-NEXT:    sltu a4, a2, a3
2338; CHECK-NOV-NEXT:    beqz s1, .LBB18_2
2339; CHECK-NOV-NEXT:  .LBB18_4: # %entry
2340; CHECK-NOV-NEXT:    slti a5, s1, 0
2341; CHECK-NOV-NEXT:    bnez a5, .LBB18_6
2342; CHECK-NOV-NEXT:  .LBB18_5: # %entry
2343; CHECK-NOV-NEXT:    mv s0, a3
2344; CHECK-NOV-NEXT:  .LBB18_6: # %entry
2345; CHECK-NOV-NEXT:    neg a6, a5
2346; CHECK-NOV-NEXT:    neg a5, a4
2347; CHECK-NOV-NEXT:    and a5, a5, a1
2348; CHECK-NOV-NEXT:    bnez a4, .LBB18_8
2349; CHECK-NOV-NEXT:  # %bb.7: # %entry
2350; CHECK-NOV-NEXT:    mv a2, a3
2351; CHECK-NOV-NEXT:  .LBB18_8: # %entry
2352; CHECK-NOV-NEXT:    and a4, a6, s1
2353; CHECK-NOV-NEXT:    slli a1, a0, 63
2354; CHECK-NOV-NEXT:    beq a5, a0, .LBB18_11
2355; CHECK-NOV-NEXT:  # %bb.9: # %entry
2356; CHECK-NOV-NEXT:    slti a3, a5, 0
2357; CHECK-NOV-NEXT:    xori a3, a3, 1
2358; CHECK-NOV-NEXT:    bne a4, a0, .LBB18_12
2359; CHECK-NOV-NEXT:  .LBB18_10:
2360; CHECK-NOV-NEXT:    sltu a0, a1, s0
2361; CHECK-NOV-NEXT:    beqz a0, .LBB18_13
2362; CHECK-NOV-NEXT:    j .LBB18_14
2363; CHECK-NOV-NEXT:  .LBB18_11:
2364; CHECK-NOV-NEXT:    sltu a3, a1, a2
2365; CHECK-NOV-NEXT:    beq a4, a0, .LBB18_10
2366; CHECK-NOV-NEXT:  .LBB18_12: # %entry
2367; CHECK-NOV-NEXT:    slti a0, a4, 0
2368; CHECK-NOV-NEXT:    xori a0, a0, 1
2369; CHECK-NOV-NEXT:    bnez a0, .LBB18_14
2370; CHECK-NOV-NEXT:  .LBB18_13: # %entry
2371; CHECK-NOV-NEXT:    mv s0, a1
2372; CHECK-NOV-NEXT:  .LBB18_14: # %entry
2373; CHECK-NOV-NEXT:    bnez a3, .LBB18_16
2374; CHECK-NOV-NEXT:  # %bb.15: # %entry
2375; CHECK-NOV-NEXT:    mv a2, a1
2376; CHECK-NOV-NEXT:  .LBB18_16: # %entry
2377; CHECK-NOV-NEXT:    mv a0, s0
2378; CHECK-NOV-NEXT:    mv a1, a2
2379; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
2380; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
2381; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
2382; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
2383; CHECK-NOV-NEXT:    .cfi_restore ra
2384; CHECK-NOV-NEXT:    .cfi_restore s0
2385; CHECK-NOV-NEXT:    .cfi_restore s1
2386; CHECK-NOV-NEXT:    .cfi_restore fs0
2387; CHECK-NOV-NEXT:    addi sp, sp, 32
2388; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
2389; CHECK-NOV-NEXT:    ret
2390;
2391; CHECK-V-LABEL: stest_f64i64:
2392; CHECK-V:       # %bb.0: # %entry
2393; CHECK-V-NEXT:    addi sp, sp, -64
2394; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
2395; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
2396; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
2397; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
2398; CHECK-V-NEXT:    .cfi_offset ra, -8
2399; CHECK-V-NEXT:    .cfi_offset s0, -16
2400; CHECK-V-NEXT:    .cfi_offset s1, -24
2401; CHECK-V-NEXT:    csrr a0, vlenb
2402; CHECK-V-NEXT:    sub sp, sp, a0
2403; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
2404; CHECK-V-NEXT:    addi a0, sp, 32
2405; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2406; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
2407; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
2408; CHECK-V-NEXT:    vfmv.f.s fa0, v9
2409; CHECK-V-NEXT:    call __fixdfti
2410; CHECK-V-NEXT:    mv s0, a0
2411; CHECK-V-NEXT:    mv s1, a1
2412; CHECK-V-NEXT:    fld fa0, 32(sp) # 8-byte Folded Reload
2413; CHECK-V-NEXT:    call __fixdfti
2414; CHECK-V-NEXT:    li a2, -1
2415; CHECK-V-NEXT:    srli a3, a2, 1
2416; CHECK-V-NEXT:    beqz a1, .LBB18_3
2417; CHECK-V-NEXT:  # %bb.1: # %entry
2418; CHECK-V-NEXT:    slti a4, a1, 0
2419; CHECK-V-NEXT:    bnez s1, .LBB18_4
2420; CHECK-V-NEXT:  .LBB18_2:
2421; CHECK-V-NEXT:    sltu a5, s0, a3
2422; CHECK-V-NEXT:    beqz a5, .LBB18_5
2423; CHECK-V-NEXT:    j .LBB18_6
2424; CHECK-V-NEXT:  .LBB18_3:
2425; CHECK-V-NEXT:    sltu a4, a0, a3
2426; CHECK-V-NEXT:    beqz s1, .LBB18_2
2427; CHECK-V-NEXT:  .LBB18_4: # %entry
2428; CHECK-V-NEXT:    slti a5, s1, 0
2429; CHECK-V-NEXT:    bnez a5, .LBB18_6
2430; CHECK-V-NEXT:  .LBB18_5: # %entry
2431; CHECK-V-NEXT:    mv s0, a3
2432; CHECK-V-NEXT:  .LBB18_6: # %entry
2433; CHECK-V-NEXT:    neg a6, a5
2434; CHECK-V-NEXT:    neg a5, a4
2435; CHECK-V-NEXT:    and a5, a5, a1
2436; CHECK-V-NEXT:    bnez a4, .LBB18_8
2437; CHECK-V-NEXT:  # %bb.7: # %entry
2438; CHECK-V-NEXT:    mv a0, a3
2439; CHECK-V-NEXT:  .LBB18_8: # %entry
2440; CHECK-V-NEXT:    and a4, a6, s1
2441; CHECK-V-NEXT:    slli a1, a2, 63
2442; CHECK-V-NEXT:    beq a5, a2, .LBB18_11
2443; CHECK-V-NEXT:  # %bb.9: # %entry
2444; CHECK-V-NEXT:    slti a3, a5, 0
2445; CHECK-V-NEXT:    xori a3, a3, 1
2446; CHECK-V-NEXT:    bne a4, a2, .LBB18_12
2447; CHECK-V-NEXT:  .LBB18_10:
2448; CHECK-V-NEXT:    sltu a2, a1, s0
2449; CHECK-V-NEXT:    beqz a2, .LBB18_13
2450; CHECK-V-NEXT:    j .LBB18_14
2451; CHECK-V-NEXT:  .LBB18_11:
2452; CHECK-V-NEXT:    sltu a3, a1, a0
2453; CHECK-V-NEXT:    beq a4, a2, .LBB18_10
2454; CHECK-V-NEXT:  .LBB18_12: # %entry
2455; CHECK-V-NEXT:    slti a2, a4, 0
2456; CHECK-V-NEXT:    xori a2, a2, 1
2457; CHECK-V-NEXT:    bnez a2, .LBB18_14
2458; CHECK-V-NEXT:  .LBB18_13: # %entry
2459; CHECK-V-NEXT:    mv s0, a1
2460; CHECK-V-NEXT:  .LBB18_14: # %entry
2461; CHECK-V-NEXT:    bnez a3, .LBB18_16
2462; CHECK-V-NEXT:  # %bb.15: # %entry
2463; CHECK-V-NEXT:    mv a0, a1
2464; CHECK-V-NEXT:  .LBB18_16: # %entry
2465; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
2466; CHECK-V-NEXT:    vmv.s.x v8, a0
2467; CHECK-V-NEXT:    vmv.s.x v9, s0
2468; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
2469; CHECK-V-NEXT:    csrr a0, vlenb
2470; CHECK-V-NEXT:    add sp, sp, a0
2471; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
2472; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
2473; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
2474; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
2475; CHECK-V-NEXT:    .cfi_restore ra
2476; CHECK-V-NEXT:    .cfi_restore s0
2477; CHECK-V-NEXT:    .cfi_restore s1
2478; CHECK-V-NEXT:    addi sp, sp, 64
2479; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
2480; CHECK-V-NEXT:    ret
2481entry:
2482  %conv = fptosi <2 x double> %x to <2 x i128>
2483  %0 = icmp slt <2 x i128> %conv, <i128 9223372036854775807, i128 9223372036854775807>
2484  %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>
2485  %1 = icmp sgt <2 x i128> %spec.store.select, <i128 -9223372036854775808, i128 -9223372036854775808>
2486  %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>
2487  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
2488  ret <2 x i64> %conv6
2489}
2490
2491define <2 x i64> @utest_f64i64(<2 x double> %x) {
2492; CHECK-NOV-LABEL: utest_f64i64:
2493; CHECK-NOV:       # %bb.0: # %entry
2494; CHECK-NOV-NEXT:    addi sp, sp, -32
2495; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
2496; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
2497; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
2498; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
2499; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
2500; CHECK-NOV-NEXT:    .cfi_offset ra, -8
2501; CHECK-NOV-NEXT:    .cfi_offset s0, -16
2502; CHECK-NOV-NEXT:    .cfi_offset s1, -24
2503; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
2504; CHECK-NOV-NEXT:    fmv.d fs0, fa1
2505; CHECK-NOV-NEXT:    call __fixunsdfti
2506; CHECK-NOV-NEXT:    mv s0, a0
2507; CHECK-NOV-NEXT:    mv s1, a1
2508; CHECK-NOV-NEXT:    fmv.d fa0, fs0
2509; CHECK-NOV-NEXT:    call __fixunsdfti
2510; CHECK-NOV-NEXT:    snez a1, a1
2511; CHECK-NOV-NEXT:    snez a2, s1
2512; CHECK-NOV-NEXT:    addi a2, a2, -1
2513; CHECK-NOV-NEXT:    addi a1, a1, -1
2514; CHECK-NOV-NEXT:    and a2, a2, s0
2515; CHECK-NOV-NEXT:    and a1, a1, a0
2516; CHECK-NOV-NEXT:    mv a0, a2
2517; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
2518; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
2519; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
2520; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
2521; CHECK-NOV-NEXT:    .cfi_restore ra
2522; CHECK-NOV-NEXT:    .cfi_restore s0
2523; CHECK-NOV-NEXT:    .cfi_restore s1
2524; CHECK-NOV-NEXT:    .cfi_restore fs0
2525; CHECK-NOV-NEXT:    addi sp, sp, 32
2526; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
2527; CHECK-NOV-NEXT:    ret
2528;
2529; CHECK-V-LABEL: utest_f64i64:
2530; CHECK-V:       # %bb.0: # %entry
2531; CHECK-V-NEXT:    addi sp, sp, -64
2532; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
2533; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
2534; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
2535; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
2536; CHECK-V-NEXT:    .cfi_offset ra, -8
2537; CHECK-V-NEXT:    .cfi_offset s0, -16
2538; CHECK-V-NEXT:    .cfi_offset s1, -24
2539; CHECK-V-NEXT:    csrr a0, vlenb
2540; CHECK-V-NEXT:    sub sp, sp, a0
2541; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
2542; CHECK-V-NEXT:    addi a0, sp, 32
2543; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2544; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
2545; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
2546; CHECK-V-NEXT:    vfmv.f.s fa0, v9
2547; CHECK-V-NEXT:    call __fixunsdfti
2548; CHECK-V-NEXT:    mv s0, a0
2549; CHECK-V-NEXT:    mv s1, a1
2550; CHECK-V-NEXT:    fld fa0, 32(sp) # 8-byte Folded Reload
2551; CHECK-V-NEXT:    call __fixunsdfti
2552; CHECK-V-NEXT:    snez a1, a1
2553; CHECK-V-NEXT:    snez a2, s1
2554; CHECK-V-NEXT:    addi a2, a2, -1
2555; CHECK-V-NEXT:    addi a1, a1, -1
2556; CHECK-V-NEXT:    and a2, a2, s0
2557; CHECK-V-NEXT:    and a0, a1, a0
2558; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
2559; CHECK-V-NEXT:    vmv.s.x v8, a0
2560; CHECK-V-NEXT:    vmv.s.x v9, a2
2561; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
2562; CHECK-V-NEXT:    csrr a0, vlenb
2563; CHECK-V-NEXT:    add sp, sp, a0
2564; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
2565; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
2566; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
2567; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
2568; CHECK-V-NEXT:    .cfi_restore ra
2569; CHECK-V-NEXT:    .cfi_restore s0
2570; CHECK-V-NEXT:    .cfi_restore s1
2571; CHECK-V-NEXT:    addi sp, sp, 64
2572; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
2573; CHECK-V-NEXT:    ret
2574entry:
2575  %conv = fptoui <2 x double> %x to <2 x i128>
2576  %0 = icmp ult <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
2577  %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
2578  %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
2579  ret <2 x i64> %conv6
2580}
2581
2582define <2 x i64> @ustest_f64i64(<2 x double> %x) {
2583; CHECK-NOV-LABEL: ustest_f64i64:
2584; CHECK-NOV:       # %bb.0: # %entry
2585; CHECK-NOV-NEXT:    addi sp, sp, -32
2586; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
2587; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
2588; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
2589; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
2590; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
2591; CHECK-NOV-NEXT:    .cfi_offset ra, -8
2592; CHECK-NOV-NEXT:    .cfi_offset s0, -16
2593; CHECK-NOV-NEXT:    .cfi_offset s1, -24
2594; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
2595; CHECK-NOV-NEXT:    fmv.d fs0, fa1
2596; CHECK-NOV-NEXT:    call __fixdfti
2597; CHECK-NOV-NEXT:    mv s0, a0
2598; CHECK-NOV-NEXT:    mv s1, a1
2599; CHECK-NOV-NEXT:    fmv.d fa0, fs0
2600; CHECK-NOV-NEXT:    call __fixdfti
2601; CHECK-NOV-NEXT:    mv a2, s1
2602; CHECK-NOV-NEXT:    blez s1, .LBB20_2
2603; CHECK-NOV-NEXT:  # %bb.1: # %entry
2604; CHECK-NOV-NEXT:    li a2, 1
2605; CHECK-NOV-NEXT:  .LBB20_2: # %entry
2606; CHECK-NOV-NEXT:    slti a3, a1, 1
2607; CHECK-NOV-NEXT:    slti a4, s1, 1
2608; CHECK-NOV-NEXT:    blez a1, .LBB20_4
2609; CHECK-NOV-NEXT:  # %bb.3: # %entry
2610; CHECK-NOV-NEXT:    li a1, 1
2611; CHECK-NOV-NEXT:  .LBB20_4: # %entry
2612; CHECK-NOV-NEXT:    neg a4, a4
2613; CHECK-NOV-NEXT:    neg a3, a3
2614; CHECK-NOV-NEXT:    and a3, a3, a0
2615; CHECK-NOV-NEXT:    beqz a1, .LBB20_7
2616; CHECK-NOV-NEXT:  # %bb.5: # %entry
2617; CHECK-NOV-NEXT:    sgtz a0, a1
2618; CHECK-NOV-NEXT:    and a1, a4, s0
2619; CHECK-NOV-NEXT:    bnez a2, .LBB20_8
2620; CHECK-NOV-NEXT:  .LBB20_6:
2621; CHECK-NOV-NEXT:    snez a2, a1
2622; CHECK-NOV-NEXT:    j .LBB20_9
2623; CHECK-NOV-NEXT:  .LBB20_7:
2624; CHECK-NOV-NEXT:    snez a0, a3
2625; CHECK-NOV-NEXT:    and a1, a4, s0
2626; CHECK-NOV-NEXT:    beqz a2, .LBB20_6
2627; CHECK-NOV-NEXT:  .LBB20_8: # %entry
2628; CHECK-NOV-NEXT:    sgtz a2, a2
2629; CHECK-NOV-NEXT:  .LBB20_9: # %entry
2630; CHECK-NOV-NEXT:    neg a2, a2
2631; CHECK-NOV-NEXT:    neg a4, a0
2632; CHECK-NOV-NEXT:    and a0, a2, a1
2633; CHECK-NOV-NEXT:    and a1, a4, a3
2634; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
2635; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
2636; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
2637; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
2638; CHECK-NOV-NEXT:    .cfi_restore ra
2639; CHECK-NOV-NEXT:    .cfi_restore s0
2640; CHECK-NOV-NEXT:    .cfi_restore s1
2641; CHECK-NOV-NEXT:    .cfi_restore fs0
2642; CHECK-NOV-NEXT:    addi sp, sp, 32
2643; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
2644; CHECK-NOV-NEXT:    ret
2645;
2646; CHECK-V-LABEL: ustest_f64i64:
2647; CHECK-V:       # %bb.0: # %entry
2648; CHECK-V-NEXT:    addi sp, sp, -64
2649; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
2650; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
2651; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
2652; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
2653; CHECK-V-NEXT:    .cfi_offset ra, -8
2654; CHECK-V-NEXT:    .cfi_offset s0, -16
2655; CHECK-V-NEXT:    .cfi_offset s1, -24
2656; CHECK-V-NEXT:    csrr a0, vlenb
2657; CHECK-V-NEXT:    sub sp, sp, a0
2658; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
2659; CHECK-V-NEXT:    addi a0, sp, 32
2660; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2661; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
2662; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
2663; CHECK-V-NEXT:    vfmv.f.s fa0, v9
2664; CHECK-V-NEXT:    call __fixdfti
2665; CHECK-V-NEXT:    mv s0, a0
2666; CHECK-V-NEXT:    mv s1, a1
2667; CHECK-V-NEXT:    fld fa0, 32(sp) # 8-byte Folded Reload
2668; CHECK-V-NEXT:    call __fixdfti
2669; CHECK-V-NEXT:    mv a2, s1
2670; CHECK-V-NEXT:    blez s1, .LBB20_2
2671; CHECK-V-NEXT:  # %bb.1: # %entry
2672; CHECK-V-NEXT:    li a2, 1
2673; CHECK-V-NEXT:  .LBB20_2: # %entry
2674; CHECK-V-NEXT:    slti a4, a1, 1
2675; CHECK-V-NEXT:    slti a3, s1, 1
2676; CHECK-V-NEXT:    blez a1, .LBB20_4
2677; CHECK-V-NEXT:  # %bb.3: # %entry
2678; CHECK-V-NEXT:    li a1, 1
2679; CHECK-V-NEXT:  .LBB20_4: # %entry
2680; CHECK-V-NEXT:    neg a3, a3
2681; CHECK-V-NEXT:    neg a4, a4
2682; CHECK-V-NEXT:    and a0, a4, a0
2683; CHECK-V-NEXT:    beqz a1, .LBB20_7
2684; CHECK-V-NEXT:  # %bb.5: # %entry
2685; CHECK-V-NEXT:    sgtz a1, a1
2686; CHECK-V-NEXT:    and a3, a3, s0
2687; CHECK-V-NEXT:    bnez a2, .LBB20_8
2688; CHECK-V-NEXT:  .LBB20_6:
2689; CHECK-V-NEXT:    snez a2, a3
2690; CHECK-V-NEXT:    j .LBB20_9
2691; CHECK-V-NEXT:  .LBB20_7:
2692; CHECK-V-NEXT:    snez a1, a0
2693; CHECK-V-NEXT:    and a3, a3, s0
2694; CHECK-V-NEXT:    beqz a2, .LBB20_6
2695; CHECK-V-NEXT:  .LBB20_8: # %entry
2696; CHECK-V-NEXT:    sgtz a2, a2
2697; CHECK-V-NEXT:  .LBB20_9: # %entry
2698; CHECK-V-NEXT:    neg a2, a2
2699; CHECK-V-NEXT:    neg a1, a1
2700; CHECK-V-NEXT:    and a2, a2, a3
2701; CHECK-V-NEXT:    and a0, a1, a0
2702; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
2703; CHECK-V-NEXT:    vmv.s.x v8, a0
2704; CHECK-V-NEXT:    vmv.s.x v9, a2
2705; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
2706; CHECK-V-NEXT:    csrr a0, vlenb
2707; CHECK-V-NEXT:    add sp, sp, a0
2708; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
2709; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
2710; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
2711; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
2712; CHECK-V-NEXT:    .cfi_restore ra
2713; CHECK-V-NEXT:    .cfi_restore s0
2714; CHECK-V-NEXT:    .cfi_restore s1
2715; CHECK-V-NEXT:    addi sp, sp, 64
2716; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
2717; CHECK-V-NEXT:    ret
2718entry:
2719  %conv = fptosi <2 x double> %x to <2 x i128>
2720  %0 = icmp slt <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
2721  %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
2722  %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer
2723  %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer
2724  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
2725  ret <2 x i64> %conv6
2726}
2727
2728define <2 x i64> @stest_f32i64(<2 x float> %x) {
2729; CHECK-NOV-LABEL: stest_f32i64:
2730; CHECK-NOV:       # %bb.0: # %entry
2731; CHECK-NOV-NEXT:    addi sp, sp, -32
2732; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
2733; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
2734; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
2735; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
2736; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
2737; CHECK-NOV-NEXT:    .cfi_offset ra, -8
2738; CHECK-NOV-NEXT:    .cfi_offset s0, -16
2739; CHECK-NOV-NEXT:    .cfi_offset s1, -24
2740; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
2741; CHECK-NOV-NEXT:    fmv.s fs0, fa1
2742; CHECK-NOV-NEXT:    call __fixsfti
2743; CHECK-NOV-NEXT:    mv s0, a0
2744; CHECK-NOV-NEXT:    mv s1, a1
2745; CHECK-NOV-NEXT:    fmv.s fa0, fs0
2746; CHECK-NOV-NEXT:    call __fixsfti
2747; CHECK-NOV-NEXT:    mv a2, a0
2748; CHECK-NOV-NEXT:    li a0, -1
2749; CHECK-NOV-NEXT:    srli a3, a0, 1
2750; CHECK-NOV-NEXT:    beqz a1, .LBB21_3
2751; CHECK-NOV-NEXT:  # %bb.1: # %entry
2752; CHECK-NOV-NEXT:    slti a4, a1, 0
2753; CHECK-NOV-NEXT:    bnez s1, .LBB21_4
2754; CHECK-NOV-NEXT:  .LBB21_2:
2755; CHECK-NOV-NEXT:    sltu a5, s0, a3
2756; CHECK-NOV-NEXT:    beqz a5, .LBB21_5
2757; CHECK-NOV-NEXT:    j .LBB21_6
2758; CHECK-NOV-NEXT:  .LBB21_3:
2759; CHECK-NOV-NEXT:    sltu a4, a2, a3
2760; CHECK-NOV-NEXT:    beqz s1, .LBB21_2
2761; CHECK-NOV-NEXT:  .LBB21_4: # %entry
2762; CHECK-NOV-NEXT:    slti a5, s1, 0
2763; CHECK-NOV-NEXT:    bnez a5, .LBB21_6
2764; CHECK-NOV-NEXT:  .LBB21_5: # %entry
2765; CHECK-NOV-NEXT:    mv s0, a3
2766; CHECK-NOV-NEXT:  .LBB21_6: # %entry
2767; CHECK-NOV-NEXT:    neg a6, a5
2768; CHECK-NOV-NEXT:    neg a5, a4
2769; CHECK-NOV-NEXT:    and a5, a5, a1
2770; CHECK-NOV-NEXT:    bnez a4, .LBB21_8
2771; CHECK-NOV-NEXT:  # %bb.7: # %entry
2772; CHECK-NOV-NEXT:    mv a2, a3
2773; CHECK-NOV-NEXT:  .LBB21_8: # %entry
2774; CHECK-NOV-NEXT:    and a4, a6, s1
2775; CHECK-NOV-NEXT:    slli a1, a0, 63
2776; CHECK-NOV-NEXT:    beq a5, a0, .LBB21_11
2777; CHECK-NOV-NEXT:  # %bb.9: # %entry
2778; CHECK-NOV-NEXT:    slti a3, a5, 0
2779; CHECK-NOV-NEXT:    xori a3, a3, 1
2780; CHECK-NOV-NEXT:    bne a4, a0, .LBB21_12
2781; CHECK-NOV-NEXT:  .LBB21_10:
2782; CHECK-NOV-NEXT:    sltu a0, a1, s0
2783; CHECK-NOV-NEXT:    beqz a0, .LBB21_13
2784; CHECK-NOV-NEXT:    j .LBB21_14
2785; CHECK-NOV-NEXT:  .LBB21_11:
2786; CHECK-NOV-NEXT:    sltu a3, a1, a2
2787; CHECK-NOV-NEXT:    beq a4, a0, .LBB21_10
2788; CHECK-NOV-NEXT:  .LBB21_12: # %entry
2789; CHECK-NOV-NEXT:    slti a0, a4, 0
2790; CHECK-NOV-NEXT:    xori a0, a0, 1
2791; CHECK-NOV-NEXT:    bnez a0, .LBB21_14
2792; CHECK-NOV-NEXT:  .LBB21_13: # %entry
2793; CHECK-NOV-NEXT:    mv s0, a1
2794; CHECK-NOV-NEXT:  .LBB21_14: # %entry
2795; CHECK-NOV-NEXT:    bnez a3, .LBB21_16
2796; CHECK-NOV-NEXT:  # %bb.15: # %entry
2797; CHECK-NOV-NEXT:    mv a2, a1
2798; CHECK-NOV-NEXT:  .LBB21_16: # %entry
2799; CHECK-NOV-NEXT:    mv a0, s0
2800; CHECK-NOV-NEXT:    mv a1, a2
2801; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
2802; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
2803; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
2804; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
2805; CHECK-NOV-NEXT:    .cfi_restore ra
2806; CHECK-NOV-NEXT:    .cfi_restore s0
2807; CHECK-NOV-NEXT:    .cfi_restore s1
2808; CHECK-NOV-NEXT:    .cfi_restore fs0
2809; CHECK-NOV-NEXT:    addi sp, sp, 32
2810; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
2811; CHECK-NOV-NEXT:    ret
2812;
2813; CHECK-V-LABEL: stest_f32i64:
2814; CHECK-V:       # %bb.0: # %entry
2815; CHECK-V-NEXT:    addi sp, sp, -64
2816; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
2817; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
2818; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
2819; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
2820; CHECK-V-NEXT:    .cfi_offset ra, -8
2821; CHECK-V-NEXT:    .cfi_offset s0, -16
2822; CHECK-V-NEXT:    .cfi_offset s1, -24
2823; CHECK-V-NEXT:    csrr a0, vlenb
2824; CHECK-V-NEXT:    sub sp, sp, a0
2825; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
2826; CHECK-V-NEXT:    addi a0, sp, 32
2827; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2828; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
2829; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
2830; CHECK-V-NEXT:    vfmv.f.s fa0, v9
2831; CHECK-V-NEXT:    call __fixsfti
2832; CHECK-V-NEXT:    mv s0, a0
2833; CHECK-V-NEXT:    mv s1, a1
2834; CHECK-V-NEXT:    flw fa0, 32(sp) # 8-byte Folded Reload
2835; CHECK-V-NEXT:    call __fixsfti
2836; CHECK-V-NEXT:    li a2, -1
2837; CHECK-V-NEXT:    srli a3, a2, 1
2838; CHECK-V-NEXT:    beqz a1, .LBB21_3
2839; CHECK-V-NEXT:  # %bb.1: # %entry
2840; CHECK-V-NEXT:    slti a4, a1, 0
2841; CHECK-V-NEXT:    bnez s1, .LBB21_4
2842; CHECK-V-NEXT:  .LBB21_2:
2843; CHECK-V-NEXT:    sltu a5, s0, a3
2844; CHECK-V-NEXT:    beqz a5, .LBB21_5
2845; CHECK-V-NEXT:    j .LBB21_6
2846; CHECK-V-NEXT:  .LBB21_3:
2847; CHECK-V-NEXT:    sltu a4, a0, a3
2848; CHECK-V-NEXT:    beqz s1, .LBB21_2
2849; CHECK-V-NEXT:  .LBB21_4: # %entry
2850; CHECK-V-NEXT:    slti a5, s1, 0
2851; CHECK-V-NEXT:    bnez a5, .LBB21_6
2852; CHECK-V-NEXT:  .LBB21_5: # %entry
2853; CHECK-V-NEXT:    mv s0, a3
2854; CHECK-V-NEXT:  .LBB21_6: # %entry
2855; CHECK-V-NEXT:    neg a6, a5
2856; CHECK-V-NEXT:    neg a5, a4
2857; CHECK-V-NEXT:    and a5, a5, a1
2858; CHECK-V-NEXT:    bnez a4, .LBB21_8
2859; CHECK-V-NEXT:  # %bb.7: # %entry
2860; CHECK-V-NEXT:    mv a0, a3
2861; CHECK-V-NEXT:  .LBB21_8: # %entry
2862; CHECK-V-NEXT:    and a4, a6, s1
2863; CHECK-V-NEXT:    slli a1, a2, 63
2864; CHECK-V-NEXT:    beq a5, a2, .LBB21_11
2865; CHECK-V-NEXT:  # %bb.9: # %entry
2866; CHECK-V-NEXT:    slti a3, a5, 0
2867; CHECK-V-NEXT:    xori a3, a3, 1
2868; CHECK-V-NEXT:    bne a4, a2, .LBB21_12
2869; CHECK-V-NEXT:  .LBB21_10:
2870; CHECK-V-NEXT:    sltu a2, a1, s0
2871; CHECK-V-NEXT:    beqz a2, .LBB21_13
2872; CHECK-V-NEXT:    j .LBB21_14
2873; CHECK-V-NEXT:  .LBB21_11:
2874; CHECK-V-NEXT:    sltu a3, a1, a0
2875; CHECK-V-NEXT:    beq a4, a2, .LBB21_10
2876; CHECK-V-NEXT:  .LBB21_12: # %entry
2877; CHECK-V-NEXT:    slti a2, a4, 0
2878; CHECK-V-NEXT:    xori a2, a2, 1
2879; CHECK-V-NEXT:    bnez a2, .LBB21_14
2880; CHECK-V-NEXT:  .LBB21_13: # %entry
2881; CHECK-V-NEXT:    mv s0, a1
2882; CHECK-V-NEXT:  .LBB21_14: # %entry
2883; CHECK-V-NEXT:    bnez a3, .LBB21_16
2884; CHECK-V-NEXT:  # %bb.15: # %entry
2885; CHECK-V-NEXT:    mv a0, a1
2886; CHECK-V-NEXT:  .LBB21_16: # %entry
2887; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
2888; CHECK-V-NEXT:    vmv.s.x v8, a0
2889; CHECK-V-NEXT:    vmv.s.x v9, s0
2890; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
2891; CHECK-V-NEXT:    csrr a0, vlenb
2892; CHECK-V-NEXT:    add sp, sp, a0
2893; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
2894; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
2895; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
2896; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
2897; CHECK-V-NEXT:    .cfi_restore ra
2898; CHECK-V-NEXT:    .cfi_restore s0
2899; CHECK-V-NEXT:    .cfi_restore s1
2900; CHECK-V-NEXT:    addi sp, sp, 64
2901; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
2902; CHECK-V-NEXT:    ret
2903entry:
2904  %conv = fptosi <2 x float> %x to <2 x i128>
2905  %0 = icmp slt <2 x i128> %conv, <i128 9223372036854775807, i128 9223372036854775807>
2906  %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>
2907  %1 = icmp sgt <2 x i128> %spec.store.select, <i128 -9223372036854775808, i128 -9223372036854775808>
2908  %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>
2909  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
2910  ret <2 x i64> %conv6
2911}
2912
2913define <2 x i64> @utest_f32i64(<2 x float> %x) {
2914; CHECK-NOV-LABEL: utest_f32i64:
2915; CHECK-NOV:       # %bb.0: # %entry
2916; CHECK-NOV-NEXT:    addi sp, sp, -32
2917; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
2918; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
2919; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
2920; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
2921; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
2922; CHECK-NOV-NEXT:    .cfi_offset ra, -8
2923; CHECK-NOV-NEXT:    .cfi_offset s0, -16
2924; CHECK-NOV-NEXT:    .cfi_offset s1, -24
2925; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
2926; CHECK-NOV-NEXT:    fmv.s fs0, fa1
2927; CHECK-NOV-NEXT:    call __fixunssfti
2928; CHECK-NOV-NEXT:    mv s0, a0
2929; CHECK-NOV-NEXT:    mv s1, a1
2930; CHECK-NOV-NEXT:    fmv.s fa0, fs0
2931; CHECK-NOV-NEXT:    call __fixunssfti
2932; CHECK-NOV-NEXT:    snez a1, a1
2933; CHECK-NOV-NEXT:    snez a2, s1
2934; CHECK-NOV-NEXT:    addi a2, a2, -1
2935; CHECK-NOV-NEXT:    addi a1, a1, -1
2936; CHECK-NOV-NEXT:    and a2, a2, s0
2937; CHECK-NOV-NEXT:    and a1, a1, a0
2938; CHECK-NOV-NEXT:    mv a0, a2
2939; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
2940; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
2941; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
2942; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
2943; CHECK-NOV-NEXT:    .cfi_restore ra
2944; CHECK-NOV-NEXT:    .cfi_restore s0
2945; CHECK-NOV-NEXT:    .cfi_restore s1
2946; CHECK-NOV-NEXT:    .cfi_restore fs0
2947; CHECK-NOV-NEXT:    addi sp, sp, 32
2948; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
2949; CHECK-NOV-NEXT:    ret
2950;
2951; CHECK-V-LABEL: utest_f32i64:
2952; CHECK-V:       # %bb.0: # %entry
2953; CHECK-V-NEXT:    addi sp, sp, -64
2954; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
2955; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
2956; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
2957; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
2958; CHECK-V-NEXT:    .cfi_offset ra, -8
2959; CHECK-V-NEXT:    .cfi_offset s0, -16
2960; CHECK-V-NEXT:    .cfi_offset s1, -24
2961; CHECK-V-NEXT:    csrr a0, vlenb
2962; CHECK-V-NEXT:    sub sp, sp, a0
2963; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
2964; CHECK-V-NEXT:    addi a0, sp, 32
2965; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
2966; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
2967; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
2968; CHECK-V-NEXT:    vfmv.f.s fa0, v9
2969; CHECK-V-NEXT:    call __fixunssfti
2970; CHECK-V-NEXT:    mv s0, a0
2971; CHECK-V-NEXT:    mv s1, a1
2972; CHECK-V-NEXT:    flw fa0, 32(sp) # 8-byte Folded Reload
2973; CHECK-V-NEXT:    call __fixunssfti
2974; CHECK-V-NEXT:    snez a1, a1
2975; CHECK-V-NEXT:    snez a2, s1
2976; CHECK-V-NEXT:    addi a2, a2, -1
2977; CHECK-V-NEXT:    addi a1, a1, -1
2978; CHECK-V-NEXT:    and a2, a2, s0
2979; CHECK-V-NEXT:    and a0, a1, a0
2980; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
2981; CHECK-V-NEXT:    vmv.s.x v8, a0
2982; CHECK-V-NEXT:    vmv.s.x v9, a2
2983; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
2984; CHECK-V-NEXT:    csrr a0, vlenb
2985; CHECK-V-NEXT:    add sp, sp, a0
2986; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
2987; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
2988; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
2989; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
2990; CHECK-V-NEXT:    .cfi_restore ra
2991; CHECK-V-NEXT:    .cfi_restore s0
2992; CHECK-V-NEXT:    .cfi_restore s1
2993; CHECK-V-NEXT:    addi sp, sp, 64
2994; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
2995; CHECK-V-NEXT:    ret
2996entry:
2997  %conv = fptoui <2 x float> %x to <2 x i128>
2998  %0 = icmp ult <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
2999  %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
3000  %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
3001  ret <2 x i64> %conv6
3002}
3003
3004define <2 x i64> @ustest_f32i64(<2 x float> %x) {
3005; CHECK-NOV-LABEL: ustest_f32i64:
3006; CHECK-NOV:       # %bb.0: # %entry
3007; CHECK-NOV-NEXT:    addi sp, sp, -32
3008; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
3009; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
3010; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
3011; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
3012; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
3013; CHECK-NOV-NEXT:    .cfi_offset ra, -8
3014; CHECK-NOV-NEXT:    .cfi_offset s0, -16
3015; CHECK-NOV-NEXT:    .cfi_offset s1, -24
3016; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
3017; CHECK-NOV-NEXT:    fmv.s fs0, fa1
3018; CHECK-NOV-NEXT:    call __fixsfti
3019; CHECK-NOV-NEXT:    mv s0, a0
3020; CHECK-NOV-NEXT:    mv s1, a1
3021; CHECK-NOV-NEXT:    fmv.s fa0, fs0
3022; CHECK-NOV-NEXT:    call __fixsfti
3023; CHECK-NOV-NEXT:    mv a2, s1
3024; CHECK-NOV-NEXT:    blez s1, .LBB23_2
3025; CHECK-NOV-NEXT:  # %bb.1: # %entry
3026; CHECK-NOV-NEXT:    li a2, 1
3027; CHECK-NOV-NEXT:  .LBB23_2: # %entry
3028; CHECK-NOV-NEXT:    slti a3, a1, 1
3029; CHECK-NOV-NEXT:    slti a4, s1, 1
3030; CHECK-NOV-NEXT:    blez a1, .LBB23_4
3031; CHECK-NOV-NEXT:  # %bb.3: # %entry
3032; CHECK-NOV-NEXT:    li a1, 1
3033; CHECK-NOV-NEXT:  .LBB23_4: # %entry
3034; CHECK-NOV-NEXT:    neg a4, a4
3035; CHECK-NOV-NEXT:    neg a3, a3
3036; CHECK-NOV-NEXT:    and a3, a3, a0
3037; CHECK-NOV-NEXT:    beqz a1, .LBB23_7
3038; CHECK-NOV-NEXT:  # %bb.5: # %entry
3039; CHECK-NOV-NEXT:    sgtz a0, a1
3040; CHECK-NOV-NEXT:    and a1, a4, s0
3041; CHECK-NOV-NEXT:    bnez a2, .LBB23_8
3042; CHECK-NOV-NEXT:  .LBB23_6:
3043; CHECK-NOV-NEXT:    snez a2, a1
3044; CHECK-NOV-NEXT:    j .LBB23_9
3045; CHECK-NOV-NEXT:  .LBB23_7:
3046; CHECK-NOV-NEXT:    snez a0, a3
3047; CHECK-NOV-NEXT:    and a1, a4, s0
3048; CHECK-NOV-NEXT:    beqz a2, .LBB23_6
3049; CHECK-NOV-NEXT:  .LBB23_8: # %entry
3050; CHECK-NOV-NEXT:    sgtz a2, a2
3051; CHECK-NOV-NEXT:  .LBB23_9: # %entry
3052; CHECK-NOV-NEXT:    neg a2, a2
3053; CHECK-NOV-NEXT:    neg a4, a0
3054; CHECK-NOV-NEXT:    and a0, a2, a1
3055; CHECK-NOV-NEXT:    and a1, a4, a3
3056; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
3057; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
3058; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
3059; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
3060; CHECK-NOV-NEXT:    .cfi_restore ra
3061; CHECK-NOV-NEXT:    .cfi_restore s0
3062; CHECK-NOV-NEXT:    .cfi_restore s1
3063; CHECK-NOV-NEXT:    .cfi_restore fs0
3064; CHECK-NOV-NEXT:    addi sp, sp, 32
3065; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
3066; CHECK-NOV-NEXT:    ret
3067;
3068; CHECK-V-LABEL: ustest_f32i64:
3069; CHECK-V:       # %bb.0: # %entry
3070; CHECK-V-NEXT:    addi sp, sp, -64
3071; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
3072; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
3073; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
3074; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
3075; CHECK-V-NEXT:    .cfi_offset ra, -8
3076; CHECK-V-NEXT:    .cfi_offset s0, -16
3077; CHECK-V-NEXT:    .cfi_offset s1, -24
3078; CHECK-V-NEXT:    csrr a0, vlenb
3079; CHECK-V-NEXT:    sub sp, sp, a0
3080; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
3081; CHECK-V-NEXT:    addi a0, sp, 32
3082; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
3083; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
3084; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
3085; CHECK-V-NEXT:    vfmv.f.s fa0, v9
3086; CHECK-V-NEXT:    call __fixsfti
3087; CHECK-V-NEXT:    mv s0, a0
3088; CHECK-V-NEXT:    mv s1, a1
3089; CHECK-V-NEXT:    flw fa0, 32(sp) # 8-byte Folded Reload
3090; CHECK-V-NEXT:    call __fixsfti
3091; CHECK-V-NEXT:    mv a2, s1
3092; CHECK-V-NEXT:    blez s1, .LBB23_2
3093; CHECK-V-NEXT:  # %bb.1: # %entry
3094; CHECK-V-NEXT:    li a2, 1
3095; CHECK-V-NEXT:  .LBB23_2: # %entry
3096; CHECK-V-NEXT:    slti a4, a1, 1
3097; CHECK-V-NEXT:    slti a3, s1, 1
3098; CHECK-V-NEXT:    blez a1, .LBB23_4
3099; CHECK-V-NEXT:  # %bb.3: # %entry
3100; CHECK-V-NEXT:    li a1, 1
3101; CHECK-V-NEXT:  .LBB23_4: # %entry
3102; CHECK-V-NEXT:    neg a3, a3
3103; CHECK-V-NEXT:    neg a4, a4
3104; CHECK-V-NEXT:    and a0, a4, a0
3105; CHECK-V-NEXT:    beqz a1, .LBB23_7
3106; CHECK-V-NEXT:  # %bb.5: # %entry
3107; CHECK-V-NEXT:    sgtz a1, a1
3108; CHECK-V-NEXT:    and a3, a3, s0
3109; CHECK-V-NEXT:    bnez a2, .LBB23_8
3110; CHECK-V-NEXT:  .LBB23_6:
3111; CHECK-V-NEXT:    snez a2, a3
3112; CHECK-V-NEXT:    j .LBB23_9
3113; CHECK-V-NEXT:  .LBB23_7:
3114; CHECK-V-NEXT:    snez a1, a0
3115; CHECK-V-NEXT:    and a3, a3, s0
3116; CHECK-V-NEXT:    beqz a2, .LBB23_6
3117; CHECK-V-NEXT:  .LBB23_8: # %entry
3118; CHECK-V-NEXT:    sgtz a2, a2
3119; CHECK-V-NEXT:  .LBB23_9: # %entry
3120; CHECK-V-NEXT:    neg a2, a2
3121; CHECK-V-NEXT:    neg a1, a1
3122; CHECK-V-NEXT:    and a2, a2, a3
3123; CHECK-V-NEXT:    and a0, a1, a0
3124; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
3125; CHECK-V-NEXT:    vmv.s.x v8, a0
3126; CHECK-V-NEXT:    vmv.s.x v9, a2
3127; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
3128; CHECK-V-NEXT:    csrr a0, vlenb
3129; CHECK-V-NEXT:    add sp, sp, a0
3130; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
3131; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
3132; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
3133; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
3134; CHECK-V-NEXT:    .cfi_restore ra
3135; CHECK-V-NEXT:    .cfi_restore s0
3136; CHECK-V-NEXT:    .cfi_restore s1
3137; CHECK-V-NEXT:    addi sp, sp, 64
3138; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
3139; CHECK-V-NEXT:    ret
3140entry:
3141  %conv = fptosi <2 x float> %x to <2 x i128>
3142  %0 = icmp slt <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
3143  %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
3144  %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer
3145  %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer
3146  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
3147  ret <2 x i64> %conv6
3148}
3149
3150define <2 x i64> @stest_f16i64(<2 x half> %x) {
3151; CHECK-NOV-LABEL: stest_f16i64:
3152; CHECK-NOV:       # %bb.0: # %entry
3153; CHECK-NOV-NEXT:    addi sp, sp, -32
3154; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
3155; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
3156; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
3157; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
3158; CHECK-NOV-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
3159; CHECK-NOV-NEXT:    .cfi_offset ra, -8
3160; CHECK-NOV-NEXT:    .cfi_offset s0, -16
3161; CHECK-NOV-NEXT:    .cfi_offset s1, -24
3162; CHECK-NOV-NEXT:    .cfi_offset s2, -32
3163; CHECK-NOV-NEXT:    mv s2, a1
3164; CHECK-NOV-NEXT:    fmv.w.x fa0, a0
3165; CHECK-NOV-NEXT:    call __extendhfsf2
3166; CHECK-NOV-NEXT:    call __fixsfti
3167; CHECK-NOV-NEXT:    mv s0, a0
3168; CHECK-NOV-NEXT:    mv s1, a1
3169; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
3170; CHECK-NOV-NEXT:    call __extendhfsf2
3171; CHECK-NOV-NEXT:    call __fixsfti
3172; CHECK-NOV-NEXT:    mv a2, a0
3173; CHECK-NOV-NEXT:    li a0, -1
3174; CHECK-NOV-NEXT:    srli a3, a0, 1
3175; CHECK-NOV-NEXT:    beqz a1, .LBB24_3
3176; CHECK-NOV-NEXT:  # %bb.1: # %entry
3177; CHECK-NOV-NEXT:    slti a4, a1, 0
3178; CHECK-NOV-NEXT:    bnez s1, .LBB24_4
3179; CHECK-NOV-NEXT:  .LBB24_2:
3180; CHECK-NOV-NEXT:    sltu a5, s0, a3
3181; CHECK-NOV-NEXT:    beqz a5, .LBB24_5
3182; CHECK-NOV-NEXT:    j .LBB24_6
3183; CHECK-NOV-NEXT:  .LBB24_3:
3184; CHECK-NOV-NEXT:    sltu a4, a2, a3
3185; CHECK-NOV-NEXT:    beqz s1, .LBB24_2
3186; CHECK-NOV-NEXT:  .LBB24_4: # %entry
3187; CHECK-NOV-NEXT:    slti a5, s1, 0
3188; CHECK-NOV-NEXT:    bnez a5, .LBB24_6
3189; CHECK-NOV-NEXT:  .LBB24_5: # %entry
3190; CHECK-NOV-NEXT:    mv s0, a3
3191; CHECK-NOV-NEXT:  .LBB24_6: # %entry
3192; CHECK-NOV-NEXT:    neg a6, a5
3193; CHECK-NOV-NEXT:    neg a5, a4
3194; CHECK-NOV-NEXT:    and a5, a5, a1
3195; CHECK-NOV-NEXT:    bnez a4, .LBB24_8
3196; CHECK-NOV-NEXT:  # %bb.7: # %entry
3197; CHECK-NOV-NEXT:    mv a2, a3
3198; CHECK-NOV-NEXT:  .LBB24_8: # %entry
3199; CHECK-NOV-NEXT:    and a4, a6, s1
3200; CHECK-NOV-NEXT:    slli a1, a0, 63
3201; CHECK-NOV-NEXT:    beq a5, a0, .LBB24_11
3202; CHECK-NOV-NEXT:  # %bb.9: # %entry
3203; CHECK-NOV-NEXT:    slti a3, a5, 0
3204; CHECK-NOV-NEXT:    xori a3, a3, 1
3205; CHECK-NOV-NEXT:    bne a4, a0, .LBB24_12
3206; CHECK-NOV-NEXT:  .LBB24_10:
3207; CHECK-NOV-NEXT:    sltu a0, a1, s0
3208; CHECK-NOV-NEXT:    beqz a0, .LBB24_13
3209; CHECK-NOV-NEXT:    j .LBB24_14
3210; CHECK-NOV-NEXT:  .LBB24_11:
3211; CHECK-NOV-NEXT:    sltu a3, a1, a2
3212; CHECK-NOV-NEXT:    beq a4, a0, .LBB24_10
3213; CHECK-NOV-NEXT:  .LBB24_12: # %entry
3214; CHECK-NOV-NEXT:    slti a0, a4, 0
3215; CHECK-NOV-NEXT:    xori a0, a0, 1
3216; CHECK-NOV-NEXT:    bnez a0, .LBB24_14
3217; CHECK-NOV-NEXT:  .LBB24_13: # %entry
3218; CHECK-NOV-NEXT:    mv s0, a1
3219; CHECK-NOV-NEXT:  .LBB24_14: # %entry
3220; CHECK-NOV-NEXT:    bnez a3, .LBB24_16
3221; CHECK-NOV-NEXT:  # %bb.15: # %entry
3222; CHECK-NOV-NEXT:    mv a2, a1
3223; CHECK-NOV-NEXT:  .LBB24_16: # %entry
3224; CHECK-NOV-NEXT:    mv a0, s0
3225; CHECK-NOV-NEXT:    mv a1, a2
3226; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
3227; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
3228; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
3229; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
3230; CHECK-NOV-NEXT:    .cfi_restore ra
3231; CHECK-NOV-NEXT:    .cfi_restore s0
3232; CHECK-NOV-NEXT:    .cfi_restore s1
3233; CHECK-NOV-NEXT:    .cfi_restore s2
3234; CHECK-NOV-NEXT:    addi sp, sp, 32
3235; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
3236; CHECK-NOV-NEXT:    ret
3237;
3238; CHECK-V-LABEL: stest_f16i64:
3239; CHECK-V:       # %bb.0: # %entry
3240; CHECK-V-NEXT:    addi sp, sp, -32
3241; CHECK-V-NEXT:    .cfi_def_cfa_offset 32
3242; CHECK-V-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
3243; CHECK-V-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
3244; CHECK-V-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
3245; CHECK-V-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
3246; CHECK-V-NEXT:    .cfi_offset ra, -8
3247; CHECK-V-NEXT:    .cfi_offset s0, -16
3248; CHECK-V-NEXT:    .cfi_offset s1, -24
3249; CHECK-V-NEXT:    .cfi_offset s2, -32
3250; CHECK-V-NEXT:    mv s2, a1
3251; CHECK-V-NEXT:    fmv.w.x fa0, a0
3252; CHECK-V-NEXT:    call __extendhfsf2
3253; CHECK-V-NEXT:    call __fixsfti
3254; CHECK-V-NEXT:    mv s0, a0
3255; CHECK-V-NEXT:    mv s1, a1
3256; CHECK-V-NEXT:    fmv.w.x fa0, s2
3257; CHECK-V-NEXT:    call __extendhfsf2
3258; CHECK-V-NEXT:    call __fixsfti
3259; CHECK-V-NEXT:    li a2, -1
3260; CHECK-V-NEXT:    srli a3, a2, 1
3261; CHECK-V-NEXT:    beqz a1, .LBB24_3
3262; CHECK-V-NEXT:  # %bb.1: # %entry
3263; CHECK-V-NEXT:    slti a4, a1, 0
3264; CHECK-V-NEXT:    bnez s1, .LBB24_4
3265; CHECK-V-NEXT:  .LBB24_2:
3266; CHECK-V-NEXT:    sltu a5, s0, a3
3267; CHECK-V-NEXT:    beqz a5, .LBB24_5
3268; CHECK-V-NEXT:    j .LBB24_6
3269; CHECK-V-NEXT:  .LBB24_3:
3270; CHECK-V-NEXT:    sltu a4, a0, a3
3271; CHECK-V-NEXT:    beqz s1, .LBB24_2
3272; CHECK-V-NEXT:  .LBB24_4: # %entry
3273; CHECK-V-NEXT:    slti a5, s1, 0
3274; CHECK-V-NEXT:    bnez a5, .LBB24_6
3275; CHECK-V-NEXT:  .LBB24_5: # %entry
3276; CHECK-V-NEXT:    mv s0, a3
3277; CHECK-V-NEXT:  .LBB24_6: # %entry
3278; CHECK-V-NEXT:    neg a6, a5
3279; CHECK-V-NEXT:    neg a5, a4
3280; CHECK-V-NEXT:    and a5, a5, a1
3281; CHECK-V-NEXT:    bnez a4, .LBB24_8
3282; CHECK-V-NEXT:  # %bb.7: # %entry
3283; CHECK-V-NEXT:    mv a0, a3
3284; CHECK-V-NEXT:  .LBB24_8: # %entry
3285; CHECK-V-NEXT:    and a4, a6, s1
3286; CHECK-V-NEXT:    slli a1, a2, 63
3287; CHECK-V-NEXT:    beq a5, a2, .LBB24_11
3288; CHECK-V-NEXT:  # %bb.9: # %entry
3289; CHECK-V-NEXT:    slti a3, a5, 0
3290; CHECK-V-NEXT:    xori a3, a3, 1
3291; CHECK-V-NEXT:    bne a4, a2, .LBB24_12
3292; CHECK-V-NEXT:  .LBB24_10:
3293; CHECK-V-NEXT:    sltu a2, a1, s0
3294; CHECK-V-NEXT:    beqz a2, .LBB24_13
3295; CHECK-V-NEXT:    j .LBB24_14
3296; CHECK-V-NEXT:  .LBB24_11:
3297; CHECK-V-NEXT:    sltu a3, a1, a0
3298; CHECK-V-NEXT:    beq a4, a2, .LBB24_10
3299; CHECK-V-NEXT:  .LBB24_12: # %entry
3300; CHECK-V-NEXT:    slti a2, a4, 0
3301; CHECK-V-NEXT:    xori a2, a2, 1
3302; CHECK-V-NEXT:    bnez a2, .LBB24_14
3303; CHECK-V-NEXT:  .LBB24_13: # %entry
3304; CHECK-V-NEXT:    mv s0, a1
3305; CHECK-V-NEXT:  .LBB24_14: # %entry
3306; CHECK-V-NEXT:    bnez a3, .LBB24_16
3307; CHECK-V-NEXT:  # %bb.15: # %entry
3308; CHECK-V-NEXT:    mv a0, a1
3309; CHECK-V-NEXT:  .LBB24_16: # %entry
3310; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
3311; CHECK-V-NEXT:    vmv.s.x v9, a0
3312; CHECK-V-NEXT:    vmv.s.x v8, s0
3313; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
3314; CHECK-V-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
3315; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
3316; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
3317; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
3318; CHECK-V-NEXT:    .cfi_restore ra
3319; CHECK-V-NEXT:    .cfi_restore s0
3320; CHECK-V-NEXT:    .cfi_restore s1
3321; CHECK-V-NEXT:    .cfi_restore s2
3322; CHECK-V-NEXT:    addi sp, sp, 32
3323; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
3324; CHECK-V-NEXT:    ret
3325entry:
3326  %conv = fptosi <2 x half> %x to <2 x i128>
3327  %0 = icmp slt <2 x i128> %conv, <i128 9223372036854775807, i128 9223372036854775807>
3328  %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>
3329  %1 = icmp sgt <2 x i128> %spec.store.select, <i128 -9223372036854775808, i128 -9223372036854775808>
3330  %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>
3331  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
3332  ret <2 x i64> %conv6
3333}
3334
3335define <2 x i64> @utesth_f16i64(<2 x half> %x) {
3336; CHECK-NOV-LABEL: utesth_f16i64:
3337; CHECK-NOV:       # %bb.0: # %entry
3338; CHECK-NOV-NEXT:    addi sp, sp, -32
3339; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
3340; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
3341; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
3342; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
3343; CHECK-NOV-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
3344; CHECK-NOV-NEXT:    .cfi_offset ra, -8
3345; CHECK-NOV-NEXT:    .cfi_offset s0, -16
3346; CHECK-NOV-NEXT:    .cfi_offset s1, -24
3347; CHECK-NOV-NEXT:    .cfi_offset s2, -32
3348; CHECK-NOV-NEXT:    mv s0, a1
3349; CHECK-NOV-NEXT:    fmv.w.x fa0, a0
3350; CHECK-NOV-NEXT:    call __extendhfsf2
3351; CHECK-NOV-NEXT:    call __fixunssfti
3352; CHECK-NOV-NEXT:    mv s1, a0
3353; CHECK-NOV-NEXT:    mv s2, a1
3354; CHECK-NOV-NEXT:    fmv.w.x fa0, s0
3355; CHECK-NOV-NEXT:    call __extendhfsf2
3356; CHECK-NOV-NEXT:    call __fixunssfti
3357; CHECK-NOV-NEXT:    snez a1, a1
3358; CHECK-NOV-NEXT:    snez a2, s2
3359; CHECK-NOV-NEXT:    addi a2, a2, -1
3360; CHECK-NOV-NEXT:    addi a1, a1, -1
3361; CHECK-NOV-NEXT:    and a2, a2, s1
3362; CHECK-NOV-NEXT:    and a1, a1, a0
3363; CHECK-NOV-NEXT:    mv a0, a2
3364; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
3365; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
3366; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
3367; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
3368; CHECK-NOV-NEXT:    .cfi_restore ra
3369; CHECK-NOV-NEXT:    .cfi_restore s0
3370; CHECK-NOV-NEXT:    .cfi_restore s1
3371; CHECK-NOV-NEXT:    .cfi_restore s2
3372; CHECK-NOV-NEXT:    addi sp, sp, 32
3373; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
3374; CHECK-NOV-NEXT:    ret
3375;
3376; CHECK-V-LABEL: utesth_f16i64:
3377; CHECK-V:       # %bb.0: # %entry
3378; CHECK-V-NEXT:    addi sp, sp, -32
3379; CHECK-V-NEXT:    .cfi_def_cfa_offset 32
3380; CHECK-V-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
3381; CHECK-V-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
3382; CHECK-V-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
3383; CHECK-V-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
3384; CHECK-V-NEXT:    .cfi_offset ra, -8
3385; CHECK-V-NEXT:    .cfi_offset s0, -16
3386; CHECK-V-NEXT:    .cfi_offset s1, -24
3387; CHECK-V-NEXT:    .cfi_offset s2, -32
3388; CHECK-V-NEXT:    mv s0, a1
3389; CHECK-V-NEXT:    fmv.w.x fa0, a0
3390; CHECK-V-NEXT:    call __extendhfsf2
3391; CHECK-V-NEXT:    call __fixunssfti
3392; CHECK-V-NEXT:    mv s1, a0
3393; CHECK-V-NEXT:    mv s2, a1
3394; CHECK-V-NEXT:    fmv.w.x fa0, s0
3395; CHECK-V-NEXT:    call __extendhfsf2
3396; CHECK-V-NEXT:    call __fixunssfti
3397; CHECK-V-NEXT:    snez a1, a1
3398; CHECK-V-NEXT:    snez a2, s2
3399; CHECK-V-NEXT:    addi a2, a2, -1
3400; CHECK-V-NEXT:    addi a1, a1, -1
3401; CHECK-V-NEXT:    and a2, a2, s1
3402; CHECK-V-NEXT:    and a0, a1, a0
3403; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
3404; CHECK-V-NEXT:    vmv.s.x v9, a0
3405; CHECK-V-NEXT:    vmv.s.x v8, a2
3406; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
3407; CHECK-V-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
3408; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
3409; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
3410; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
3411; CHECK-V-NEXT:    .cfi_restore ra
3412; CHECK-V-NEXT:    .cfi_restore s0
3413; CHECK-V-NEXT:    .cfi_restore s1
3414; CHECK-V-NEXT:    .cfi_restore s2
3415; CHECK-V-NEXT:    addi sp, sp, 32
3416; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
3417; CHECK-V-NEXT:    ret
3418entry:
3419  %conv = fptoui <2 x half> %x to <2 x i128>
3420  %0 = icmp ult <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
3421  %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
3422  %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
3423  ret <2 x i64> %conv6
3424}
3425
3426define <2 x i64> @ustest_f16i64(<2 x half> %x) {
3427; CHECK-NOV-LABEL: ustest_f16i64:
3428; CHECK-NOV:       # %bb.0: # %entry
3429; CHECK-NOV-NEXT:    addi sp, sp, -32
3430; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
3431; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
3432; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
3433; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
3434; CHECK-NOV-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
3435; CHECK-NOV-NEXT:    .cfi_offset ra, -8
3436; CHECK-NOV-NEXT:    .cfi_offset s0, -16
3437; CHECK-NOV-NEXT:    .cfi_offset s1, -24
3438; CHECK-NOV-NEXT:    .cfi_offset s2, -32
3439; CHECK-NOV-NEXT:    mv s2, a1
3440; CHECK-NOV-NEXT:    fmv.w.x fa0, a0
3441; CHECK-NOV-NEXT:    call __extendhfsf2
3442; CHECK-NOV-NEXT:    call __fixsfti
3443; CHECK-NOV-NEXT:    mv s0, a0
3444; CHECK-NOV-NEXT:    mv s1, a1
3445; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
3446; CHECK-NOV-NEXT:    call __extendhfsf2
3447; CHECK-NOV-NEXT:    call __fixsfti
3448; CHECK-NOV-NEXT:    mv a2, s1
3449; CHECK-NOV-NEXT:    blez s1, .LBB26_2
3450; CHECK-NOV-NEXT:  # %bb.1: # %entry
3451; CHECK-NOV-NEXT:    li a2, 1
3452; CHECK-NOV-NEXT:  .LBB26_2: # %entry
3453; CHECK-NOV-NEXT:    slti a3, a1, 1
3454; CHECK-NOV-NEXT:    slti a4, s1, 1
3455; CHECK-NOV-NEXT:    blez a1, .LBB26_4
3456; CHECK-NOV-NEXT:  # %bb.3: # %entry
3457; CHECK-NOV-NEXT:    li a1, 1
3458; CHECK-NOV-NEXT:  .LBB26_4: # %entry
3459; CHECK-NOV-NEXT:    neg a4, a4
3460; CHECK-NOV-NEXT:    neg a3, a3
3461; CHECK-NOV-NEXT:    and a3, a3, a0
3462; CHECK-NOV-NEXT:    beqz a1, .LBB26_7
3463; CHECK-NOV-NEXT:  # %bb.5: # %entry
3464; CHECK-NOV-NEXT:    sgtz a0, a1
3465; CHECK-NOV-NEXT:    and a1, a4, s0
3466; CHECK-NOV-NEXT:    bnez a2, .LBB26_8
3467; CHECK-NOV-NEXT:  .LBB26_6:
3468; CHECK-NOV-NEXT:    snez a2, a1
3469; CHECK-NOV-NEXT:    j .LBB26_9
3470; CHECK-NOV-NEXT:  .LBB26_7:
3471; CHECK-NOV-NEXT:    snez a0, a3
3472; CHECK-NOV-NEXT:    and a1, a4, s0
3473; CHECK-NOV-NEXT:    beqz a2, .LBB26_6
3474; CHECK-NOV-NEXT:  .LBB26_8: # %entry
3475; CHECK-NOV-NEXT:    sgtz a2, a2
3476; CHECK-NOV-NEXT:  .LBB26_9: # %entry
3477; CHECK-NOV-NEXT:    neg a2, a2
3478; CHECK-NOV-NEXT:    neg a4, a0
3479; CHECK-NOV-NEXT:    and a0, a2, a1
3480; CHECK-NOV-NEXT:    and a1, a4, a3
3481; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
3482; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
3483; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
3484; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
3485; CHECK-NOV-NEXT:    .cfi_restore ra
3486; CHECK-NOV-NEXT:    .cfi_restore s0
3487; CHECK-NOV-NEXT:    .cfi_restore s1
3488; CHECK-NOV-NEXT:    .cfi_restore s2
3489; CHECK-NOV-NEXT:    addi sp, sp, 32
3490; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
3491; CHECK-NOV-NEXT:    ret
3492;
3493; CHECK-V-LABEL: ustest_f16i64:
3494; CHECK-V:       # %bb.0: # %entry
3495; CHECK-V-NEXT:    addi sp, sp, -32
3496; CHECK-V-NEXT:    .cfi_def_cfa_offset 32
3497; CHECK-V-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
3498; CHECK-V-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
3499; CHECK-V-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
3500; CHECK-V-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
3501; CHECK-V-NEXT:    .cfi_offset ra, -8
3502; CHECK-V-NEXT:    .cfi_offset s0, -16
3503; CHECK-V-NEXT:    .cfi_offset s1, -24
3504; CHECK-V-NEXT:    .cfi_offset s2, -32
3505; CHECK-V-NEXT:    mv s2, a1
3506; CHECK-V-NEXT:    fmv.w.x fa0, a0
3507; CHECK-V-NEXT:    call __extendhfsf2
3508; CHECK-V-NEXT:    call __fixsfti
3509; CHECK-V-NEXT:    mv s0, a0
3510; CHECK-V-NEXT:    mv s1, a1
3511; CHECK-V-NEXT:    fmv.w.x fa0, s2
3512; CHECK-V-NEXT:    call __extendhfsf2
3513; CHECK-V-NEXT:    call __fixsfti
3514; CHECK-V-NEXT:    mv a2, s1
3515; CHECK-V-NEXT:    blez s1, .LBB26_2
3516; CHECK-V-NEXT:  # %bb.1: # %entry
3517; CHECK-V-NEXT:    li a2, 1
3518; CHECK-V-NEXT:  .LBB26_2: # %entry
3519; CHECK-V-NEXT:    slti a4, a1, 1
3520; CHECK-V-NEXT:    slti a3, s1, 1
3521; CHECK-V-NEXT:    blez a1, .LBB26_4
3522; CHECK-V-NEXT:  # %bb.3: # %entry
3523; CHECK-V-NEXT:    li a1, 1
3524; CHECK-V-NEXT:  .LBB26_4: # %entry
3525; CHECK-V-NEXT:    neg a3, a3
3526; CHECK-V-NEXT:    neg a4, a4
3527; CHECK-V-NEXT:    and a0, a4, a0
3528; CHECK-V-NEXT:    beqz a1, .LBB26_7
3529; CHECK-V-NEXT:  # %bb.5: # %entry
3530; CHECK-V-NEXT:    sgtz a1, a1
3531; CHECK-V-NEXT:    and a3, a3, s0
3532; CHECK-V-NEXT:    bnez a2, .LBB26_8
3533; CHECK-V-NEXT:  .LBB26_6:
3534; CHECK-V-NEXT:    snez a2, a3
3535; CHECK-V-NEXT:    j .LBB26_9
3536; CHECK-V-NEXT:  .LBB26_7:
3537; CHECK-V-NEXT:    snez a1, a0
3538; CHECK-V-NEXT:    and a3, a3, s0
3539; CHECK-V-NEXT:    beqz a2, .LBB26_6
3540; CHECK-V-NEXT:  .LBB26_8: # %entry
3541; CHECK-V-NEXT:    sgtz a2, a2
3542; CHECK-V-NEXT:  .LBB26_9: # %entry
3543; CHECK-V-NEXT:    neg a2, a2
3544; CHECK-V-NEXT:    neg a1, a1
3545; CHECK-V-NEXT:    and a2, a2, a3
3546; CHECK-V-NEXT:    and a0, a1, a0
3547; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
3548; CHECK-V-NEXT:    vmv.s.x v9, a0
3549; CHECK-V-NEXT:    vmv.s.x v8, a2
3550; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
3551; CHECK-V-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
3552; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
3553; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
3554; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
3555; CHECK-V-NEXT:    .cfi_restore ra
3556; CHECK-V-NEXT:    .cfi_restore s0
3557; CHECK-V-NEXT:    .cfi_restore s1
3558; CHECK-V-NEXT:    .cfi_restore s2
3559; CHECK-V-NEXT:    addi sp, sp, 32
3560; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
3561; CHECK-V-NEXT:    ret
3562entry:
3563  %conv = fptosi <2 x half> %x to <2 x i128>
3564  %0 = icmp slt <2 x i128> %conv, <i128 18446744073709551616, i128 18446744073709551616>
3565  %spec.store.select = select <2 x i1> %0, <2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>
3566  %1 = icmp sgt <2 x i128> %spec.store.select, zeroinitializer
3567  %spec.store.select7 = select <2 x i1> %1, <2 x i128> %spec.store.select, <2 x i128> zeroinitializer
3568  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
3569  ret <2 x i64> %conv6
3570}
3571
3572
3573
3574; i32 saturate
3575
3576define <2 x i32> @stest_f64i32_mm(<2 x double> %x) {
3577; CHECK-NOV-LABEL: stest_f64i32_mm:
3578; CHECK-NOV:       # %bb.0: # %entry
3579; CHECK-NOV-NEXT:    fcvt.l.d a1, fa1, rtz
3580; CHECK-NOV-NEXT:    lui a2, 524288
3581; CHECK-NOV-NEXT:    addiw a3, a2, -1
3582; CHECK-NOV-NEXT:    fcvt.l.d a0, fa0, rtz
3583; CHECK-NOV-NEXT:    bge a1, a3, .LBB27_5
3584; CHECK-NOV-NEXT:  # %bb.1: # %entry
3585; CHECK-NOV-NEXT:    bge a0, a3, .LBB27_6
3586; CHECK-NOV-NEXT:  .LBB27_2: # %entry
3587; CHECK-NOV-NEXT:    bge a2, a0, .LBB27_7
3588; CHECK-NOV-NEXT:  .LBB27_3: # %entry
3589; CHECK-NOV-NEXT:    bge a2, a1, .LBB27_8
3590; CHECK-NOV-NEXT:  .LBB27_4: # %entry
3591; CHECK-NOV-NEXT:    ret
3592; CHECK-NOV-NEXT:  .LBB27_5: # %entry
3593; CHECK-NOV-NEXT:    mv a1, a3
3594; CHECK-NOV-NEXT:    blt a0, a3, .LBB27_2
3595; CHECK-NOV-NEXT:  .LBB27_6: # %entry
3596; CHECK-NOV-NEXT:    mv a0, a3
3597; CHECK-NOV-NEXT:    blt a2, a3, .LBB27_3
3598; CHECK-NOV-NEXT:  .LBB27_7: # %entry
3599; CHECK-NOV-NEXT:    lui a0, 524288
3600; CHECK-NOV-NEXT:    blt a2, a1, .LBB27_4
3601; CHECK-NOV-NEXT:  .LBB27_8: # %entry
3602; CHECK-NOV-NEXT:    lui a1, 524288
3603; CHECK-NOV-NEXT:    ret
3604;
3605; CHECK-V-LABEL: stest_f64i32_mm:
3606; CHECK-V:       # %bb.0: # %entry
3607; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
3608; CHECK-V-NEXT:    vfcvt.rtz.x.f.v v8, v8
3609; CHECK-V-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
3610; CHECK-V-NEXT:    vnclip.wi v8, v8, 0
3611; CHECK-V-NEXT:    ret
3612entry:
3613  %conv = fptosi <2 x double> %x to <2 x i64>
3614  %spec.store.select = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %conv, <2 x i64> <i64 2147483647, i64 2147483647>)
3615  %spec.store.select7 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %spec.store.select, <2 x i64> <i64 -2147483648, i64 -2147483648>)
3616  %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32>
3617  ret <2 x i32> %conv6
3618}
3619
3620define <2 x i32> @utest_f64i32_mm(<2 x double> %x) {
3621; CHECK-NOV-LABEL: utest_f64i32_mm:
3622; CHECK-NOV:       # %bb.0: # %entry
3623; CHECK-NOV-NEXT:    fcvt.lu.d a0, fa0, rtz
3624; CHECK-NOV-NEXT:    li a2, -1
3625; CHECK-NOV-NEXT:    srli a2, a2, 32
3626; CHECK-NOV-NEXT:    fcvt.lu.d a1, fa1, rtz
3627; CHECK-NOV-NEXT:    bgeu a0, a2, .LBB28_3
3628; CHECK-NOV-NEXT:  # %bb.1: # %entry
3629; CHECK-NOV-NEXT:    bgeu a1, a2, .LBB28_4
3630; CHECK-NOV-NEXT:  .LBB28_2: # %entry
3631; CHECK-NOV-NEXT:    ret
3632; CHECK-NOV-NEXT:  .LBB28_3: # %entry
3633; CHECK-NOV-NEXT:    mv a0, a2
3634; CHECK-NOV-NEXT:    bltu a1, a2, .LBB28_2
3635; CHECK-NOV-NEXT:  .LBB28_4: # %entry
3636; CHECK-NOV-NEXT:    mv a1, a2
3637; CHECK-NOV-NEXT:    ret
3638;
3639; CHECK-V-LABEL: utest_f64i32_mm:
3640; CHECK-V:       # %bb.0: # %entry
3641; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
3642; CHECK-V-NEXT:    vfcvt.rtz.xu.f.v v8, v8
3643; CHECK-V-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
3644; CHECK-V-NEXT:    vnclipu.wi v8, v8, 0
3645; CHECK-V-NEXT:    ret
3646entry:
3647  %conv = fptoui <2 x double> %x to <2 x i64>
3648  %spec.store.select = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295>)
3649  %conv6 = trunc <2 x i64> %spec.store.select to <2 x i32>
3650  ret <2 x i32> %conv6
3651}
3652
3653define <2 x i32> @ustest_f64i32_mm(<2 x double> %x) {
3654; CHECK-NOV-LABEL: ustest_f64i32_mm:
3655; CHECK-NOV:       # %bb.0: # %entry
3656; CHECK-NOV-NEXT:    fcvt.l.d a1, fa1, rtz
3657; CHECK-NOV-NEXT:    li a2, -1
3658; CHECK-NOV-NEXT:    srli a2, a2, 32
3659; CHECK-NOV-NEXT:    fcvt.l.d a0, fa0, rtz
3660; CHECK-NOV-NEXT:    blt a1, a2, .LBB29_2
3661; CHECK-NOV-NEXT:  # %bb.1: # %entry
3662; CHECK-NOV-NEXT:    mv a1, a2
3663; CHECK-NOV-NEXT:  .LBB29_2: # %entry
3664; CHECK-NOV-NEXT:    blt a0, a2, .LBB29_4
3665; CHECK-NOV-NEXT:  # %bb.3: # %entry
3666; CHECK-NOV-NEXT:    mv a0, a2
3667; CHECK-NOV-NEXT:  .LBB29_4: # %entry
3668; CHECK-NOV-NEXT:    sgtz a2, a0
3669; CHECK-NOV-NEXT:    neg a2, a2
3670; CHECK-NOV-NEXT:    and a0, a2, a0
3671; CHECK-NOV-NEXT:    sgtz a2, a1
3672; CHECK-NOV-NEXT:    neg a2, a2
3673; CHECK-NOV-NEXT:    and a1, a2, a1
3674; CHECK-NOV-NEXT:    ret
3675;
3676; CHECK-V-LABEL: ustest_f64i32_mm:
3677; CHECK-V:       # %bb.0: # %entry
3678; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
3679; CHECK-V-NEXT:    vfcvt.rtz.x.f.v v8, v8
3680; CHECK-V-NEXT:    vmax.vx v8, v8, zero
3681; CHECK-V-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
3682; CHECK-V-NEXT:    vnclipu.wi v8, v8, 0
3683; CHECK-V-NEXT:    ret
3684entry:
3685  %conv = fptosi <2 x double> %x to <2 x i64>
3686  %spec.store.select = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %conv, <2 x i64> <i64 4294967295, i64 4294967295>)
3687  %spec.store.select7 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %spec.store.select, <2 x i64> zeroinitializer)
3688  %conv6 = trunc <2 x i64> %spec.store.select7 to <2 x i32>
3689  ret <2 x i32> %conv6
3690}
3691
3692define <4 x i32> @stest_f32i32_mm(<4 x float> %x) {
3693; CHECK-NOV-LABEL: stest_f32i32_mm:
3694; CHECK-NOV:       # %bb.0: # %entry
3695; CHECK-NOV-NEXT:    fcvt.l.s a1, fa3, rtz
3696; CHECK-NOV-NEXT:    lui a3, 524288
3697; CHECK-NOV-NEXT:    addiw a6, a3, -1
3698; CHECK-NOV-NEXT:    fcvt.l.s a2, fa2, rtz
3699; CHECK-NOV-NEXT:    bge a1, a6, .LBB30_10
3700; CHECK-NOV-NEXT:  # %bb.1: # %entry
3701; CHECK-NOV-NEXT:    fcvt.l.s a4, fa1, rtz
3702; CHECK-NOV-NEXT:    bge a2, a6, .LBB30_11
3703; CHECK-NOV-NEXT:  .LBB30_2: # %entry
3704; CHECK-NOV-NEXT:    fcvt.l.s a5, fa0, rtz
3705; CHECK-NOV-NEXT:    bge a4, a6, .LBB30_12
3706; CHECK-NOV-NEXT:  .LBB30_3: # %entry
3707; CHECK-NOV-NEXT:    bge a5, a6, .LBB30_13
3708; CHECK-NOV-NEXT:  .LBB30_4: # %entry
3709; CHECK-NOV-NEXT:    bge a3, a5, .LBB30_14
3710; CHECK-NOV-NEXT:  .LBB30_5: # %entry
3711; CHECK-NOV-NEXT:    bge a3, a4, .LBB30_15
3712; CHECK-NOV-NEXT:  .LBB30_6: # %entry
3713; CHECK-NOV-NEXT:    bge a3, a2, .LBB30_16
3714; CHECK-NOV-NEXT:  .LBB30_7: # %entry
3715; CHECK-NOV-NEXT:    blt a3, a1, .LBB30_9
3716; CHECK-NOV-NEXT:  .LBB30_8: # %entry
3717; CHECK-NOV-NEXT:    lui a1, 524288
3718; CHECK-NOV-NEXT:  .LBB30_9: # %entry
3719; CHECK-NOV-NEXT:    sw a5, 0(a0)
3720; CHECK-NOV-NEXT:    sw a4, 4(a0)
3721; CHECK-NOV-NEXT:    sw a2, 8(a0)
3722; CHECK-NOV-NEXT:    sw a1, 12(a0)
3723; CHECK-NOV-NEXT:    ret
3724; CHECK-NOV-NEXT:  .LBB30_10: # %entry
3725; CHECK-NOV-NEXT:    mv a1, a6
3726; CHECK-NOV-NEXT:    fcvt.l.s a4, fa1, rtz
3727; CHECK-NOV-NEXT:    blt a2, a6, .LBB30_2
3728; CHECK-NOV-NEXT:  .LBB30_11: # %entry
3729; CHECK-NOV-NEXT:    mv a2, a6
3730; CHECK-NOV-NEXT:    fcvt.l.s a5, fa0, rtz
3731; CHECK-NOV-NEXT:    blt a4, a6, .LBB30_3
3732; CHECK-NOV-NEXT:  .LBB30_12: # %entry
3733; CHECK-NOV-NEXT:    mv a4, a6
3734; CHECK-NOV-NEXT:    blt a5, a6, .LBB30_4
3735; CHECK-NOV-NEXT:  .LBB30_13: # %entry
3736; CHECK-NOV-NEXT:    mv a5, a6
3737; CHECK-NOV-NEXT:    blt a3, a6, .LBB30_5
3738; CHECK-NOV-NEXT:  .LBB30_14: # %entry
3739; CHECK-NOV-NEXT:    lui a5, 524288
3740; CHECK-NOV-NEXT:    blt a3, a4, .LBB30_6
3741; CHECK-NOV-NEXT:  .LBB30_15: # %entry
3742; CHECK-NOV-NEXT:    lui a4, 524288
3743; CHECK-NOV-NEXT:    blt a3, a2, .LBB30_7
3744; CHECK-NOV-NEXT:  .LBB30_16: # %entry
3745; CHECK-NOV-NEXT:    lui a2, 524288
3746; CHECK-NOV-NEXT:    bge a3, a1, .LBB30_8
3747; CHECK-NOV-NEXT:    j .LBB30_9
3748;
3749; CHECK-V-LABEL: stest_f32i32_mm:
3750; CHECK-V:       # %bb.0: # %entry
3751; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
3752; CHECK-V-NEXT:    vfwcvt.rtz.x.f.v v10, v8
3753; CHECK-V-NEXT:    vnclip.wi v8, v10, 0
3754; CHECK-V-NEXT:    ret
3755entry:
3756  %conv = fptosi <4 x float> %x to <4 x i64>
3757  %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
3758  %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>)
3759  %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
3760  ret <4 x i32> %conv6
3761}
3762
3763define <4 x i32> @utest_f32i32_mm(<4 x float> %x) {
3764; CHECK-NOV-LABEL: utest_f32i32_mm:
3765; CHECK-NOV:       # %bb.0: # %entry
3766; CHECK-NOV-NEXT:    fcvt.lu.s a1, fa0, rtz
3767; CHECK-NOV-NEXT:    li a3, -1
3768; CHECK-NOV-NEXT:    srli a3, a3, 32
3769; CHECK-NOV-NEXT:    fcvt.lu.s a2, fa1, rtz
3770; CHECK-NOV-NEXT:    bgeu a1, a3, .LBB31_6
3771; CHECK-NOV-NEXT:  # %bb.1: # %entry
3772; CHECK-NOV-NEXT:    fcvt.lu.s a4, fa2, rtz
3773; CHECK-NOV-NEXT:    bgeu a2, a3, .LBB31_7
3774; CHECK-NOV-NEXT:  .LBB31_2: # %entry
3775; CHECK-NOV-NEXT:    fcvt.lu.s a5, fa3, rtz
3776; CHECK-NOV-NEXT:    bgeu a4, a3, .LBB31_8
3777; CHECK-NOV-NEXT:  .LBB31_3: # %entry
3778; CHECK-NOV-NEXT:    bltu a5, a3, .LBB31_5
3779; CHECK-NOV-NEXT:  .LBB31_4: # %entry
3780; CHECK-NOV-NEXT:    mv a5, a3
3781; CHECK-NOV-NEXT:  .LBB31_5: # %entry
3782; CHECK-NOV-NEXT:    sw a1, 0(a0)
3783; CHECK-NOV-NEXT:    sw a2, 4(a0)
3784; CHECK-NOV-NEXT:    sw a4, 8(a0)
3785; CHECK-NOV-NEXT:    sw a5, 12(a0)
3786; CHECK-NOV-NEXT:    ret
3787; CHECK-NOV-NEXT:  .LBB31_6: # %entry
3788; CHECK-NOV-NEXT:    mv a1, a3
3789; CHECK-NOV-NEXT:    fcvt.lu.s a4, fa2, rtz
3790; CHECK-NOV-NEXT:    bltu a2, a3, .LBB31_2
3791; CHECK-NOV-NEXT:  .LBB31_7: # %entry
3792; CHECK-NOV-NEXT:    mv a2, a3
3793; CHECK-NOV-NEXT:    fcvt.lu.s a5, fa3, rtz
3794; CHECK-NOV-NEXT:    bltu a4, a3, .LBB31_3
3795; CHECK-NOV-NEXT:  .LBB31_8: # %entry
3796; CHECK-NOV-NEXT:    mv a4, a3
3797; CHECK-NOV-NEXT:    bgeu a5, a3, .LBB31_4
3798; CHECK-NOV-NEXT:    j .LBB31_5
3799;
3800; CHECK-V-LABEL: utest_f32i32_mm:
3801; CHECK-V:       # %bb.0: # %entry
3802; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
3803; CHECK-V-NEXT:    vfwcvt.rtz.xu.f.v v10, v8
3804; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
3805; CHECK-V-NEXT:    ret
3806entry:
3807  %conv = fptoui <4 x float> %x to <4 x i64>
3808  %spec.store.select = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
3809  %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32>
3810  ret <4 x i32> %conv6
3811}
3812
3813define <4 x i32> @ustest_f32i32_mm(<4 x float> %x) {
3814; CHECK-NOV-LABEL: ustest_f32i32_mm:
3815; CHECK-NOV:       # %bb.0: # %entry
3816; CHECK-NOV-NEXT:    fcvt.l.s a1, fa3, rtz
3817; CHECK-NOV-NEXT:    li a3, -1
3818; CHECK-NOV-NEXT:    srli a3, a3, 32
3819; CHECK-NOV-NEXT:    fcvt.l.s a2, fa2, rtz
3820; CHECK-NOV-NEXT:    bge a1, a3, .LBB32_6
3821; CHECK-NOV-NEXT:  # %bb.1: # %entry
3822; CHECK-NOV-NEXT:    fcvt.l.s a4, fa1, rtz
3823; CHECK-NOV-NEXT:    bge a2, a3, .LBB32_7
3824; CHECK-NOV-NEXT:  .LBB32_2: # %entry
3825; CHECK-NOV-NEXT:    fcvt.l.s a5, fa0, rtz
3826; CHECK-NOV-NEXT:    bge a4, a3, .LBB32_8
3827; CHECK-NOV-NEXT:  .LBB32_3: # %entry
3828; CHECK-NOV-NEXT:    blt a5, a3, .LBB32_5
3829; CHECK-NOV-NEXT:  .LBB32_4: # %entry
3830; CHECK-NOV-NEXT:    mv a5, a3
3831; CHECK-NOV-NEXT:  .LBB32_5: # %entry
3832; CHECK-NOV-NEXT:    sgtz a3, a5
3833; CHECK-NOV-NEXT:    negw a3, a3
3834; CHECK-NOV-NEXT:    and a3, a3, a5
3835; CHECK-NOV-NEXT:    sgtz a5, a4
3836; CHECK-NOV-NEXT:    negw a5, a5
3837; CHECK-NOV-NEXT:    and a4, a5, a4
3838; CHECK-NOV-NEXT:    sgtz a5, a2
3839; CHECK-NOV-NEXT:    negw a5, a5
3840; CHECK-NOV-NEXT:    and a2, a5, a2
3841; CHECK-NOV-NEXT:    sgtz a5, a1
3842; CHECK-NOV-NEXT:    negw a5, a5
3843; CHECK-NOV-NEXT:    and a1, a5, a1
3844; CHECK-NOV-NEXT:    sw a3, 0(a0)
3845; CHECK-NOV-NEXT:    sw a4, 4(a0)
3846; CHECK-NOV-NEXT:    sw a2, 8(a0)
3847; CHECK-NOV-NEXT:    sw a1, 12(a0)
3848; CHECK-NOV-NEXT:    ret
3849; CHECK-NOV-NEXT:  .LBB32_6: # %entry
3850; CHECK-NOV-NEXT:    mv a1, a3
3851; CHECK-NOV-NEXT:    fcvt.l.s a4, fa1, rtz
3852; CHECK-NOV-NEXT:    blt a2, a3, .LBB32_2
3853; CHECK-NOV-NEXT:  .LBB32_7: # %entry
3854; CHECK-NOV-NEXT:    mv a2, a3
3855; CHECK-NOV-NEXT:    fcvt.l.s a5, fa0, rtz
3856; CHECK-NOV-NEXT:    blt a4, a3, .LBB32_3
3857; CHECK-NOV-NEXT:  .LBB32_8: # %entry
3858; CHECK-NOV-NEXT:    mv a4, a3
3859; CHECK-NOV-NEXT:    bge a5, a3, .LBB32_4
3860; CHECK-NOV-NEXT:    j .LBB32_5
3861;
3862; CHECK-V-LABEL: ustest_f32i32_mm:
3863; CHECK-V:       # %bb.0: # %entry
3864; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
3865; CHECK-V-NEXT:    vfwcvt.rtz.x.f.v v10, v8
3866; CHECK-V-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
3867; CHECK-V-NEXT:    vmax.vx v10, v10, zero
3868; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
3869; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
3870; CHECK-V-NEXT:    ret
3871entry:
3872  %conv = fptosi <4 x float> %x to <4 x i64>
3873  %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
3874  %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> zeroinitializer)
3875  %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
3876  ret <4 x i32> %conv6
3877}
3878
3879define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
3880; CHECK-NOV-LABEL: stest_f16i32_mm:
3881; CHECK-NOV:       # %bb.0: # %entry
3882; CHECK-NOV-NEXT:    addi sp, sp, -64
3883; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 64
3884; CHECK-NOV-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
3885; CHECK-NOV-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
3886; CHECK-NOV-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
3887; CHECK-NOV-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
3888; CHECK-NOV-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
3889; CHECK-NOV-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
3890; CHECK-NOV-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
3891; CHECK-NOV-NEXT:    fsd fs2, 0(sp) # 8-byte Folded Spill
3892; CHECK-NOV-NEXT:    .cfi_offset ra, -8
3893; CHECK-NOV-NEXT:    .cfi_offset s0, -16
3894; CHECK-NOV-NEXT:    .cfi_offset s1, -24
3895; CHECK-NOV-NEXT:    .cfi_offset s2, -32
3896; CHECK-NOV-NEXT:    .cfi_offset s3, -40
3897; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
3898; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
3899; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
3900; CHECK-NOV-NEXT:    .cfi_remember_state
3901; CHECK-NOV-NEXT:    lhu s1, 0(a1)
3902; CHECK-NOV-NEXT:    lhu s2, 8(a1)
3903; CHECK-NOV-NEXT:    lhu a2, 16(a1)
3904; CHECK-NOV-NEXT:    lhu s3, 24(a1)
3905; CHECK-NOV-NEXT:    mv s0, a0
3906; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
3907; CHECK-NOV-NEXT:    call __extendhfsf2
3908; CHECK-NOV-NEXT:    fmv.s fs2, fa0
3909; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
3910; CHECK-NOV-NEXT:    call __extendhfsf2
3911; CHECK-NOV-NEXT:    fmv.s fs1, fa0
3912; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
3913; CHECK-NOV-NEXT:    call __extendhfsf2
3914; CHECK-NOV-NEXT:    fmv.s fs0, fa0
3915; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
3916; CHECK-NOV-NEXT:    fcvt.l.s s1, fs2, rtz
3917; CHECK-NOV-NEXT:    call __extendhfsf2
3918; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
3919; CHECK-NOV-NEXT:    lui a1, 524288
3920; CHECK-NOV-NEXT:    addiw a4, a1, -1
3921; CHECK-NOV-NEXT:    bge a0, a4, .LBB33_10
3922; CHECK-NOV-NEXT:  # %bb.1: # %entry
3923; CHECK-NOV-NEXT:    fcvt.l.s a2, fs1, rtz
3924; CHECK-NOV-NEXT:    bge s1, a4, .LBB33_11
3925; CHECK-NOV-NEXT:  .LBB33_2: # %entry
3926; CHECK-NOV-NEXT:    fcvt.l.s a3, fs0, rtz
3927; CHECK-NOV-NEXT:    bge a2, a4, .LBB33_12
3928; CHECK-NOV-NEXT:  .LBB33_3: # %entry
3929; CHECK-NOV-NEXT:    bge a3, a4, .LBB33_13
3930; CHECK-NOV-NEXT:  .LBB33_4: # %entry
3931; CHECK-NOV-NEXT:    bge a1, a3, .LBB33_14
3932; CHECK-NOV-NEXT:  .LBB33_5: # %entry
3933; CHECK-NOV-NEXT:    bge a1, a2, .LBB33_15
3934; CHECK-NOV-NEXT:  .LBB33_6: # %entry
3935; CHECK-NOV-NEXT:    bge a1, s1, .LBB33_16
3936; CHECK-NOV-NEXT:  .LBB33_7: # %entry
3937; CHECK-NOV-NEXT:    blt a1, a0, .LBB33_9
3938; CHECK-NOV-NEXT:  .LBB33_8: # %entry
3939; CHECK-NOV-NEXT:    lui a0, 524288
3940; CHECK-NOV-NEXT:  .LBB33_9: # %entry
3941; CHECK-NOV-NEXT:    sw a3, 0(s0)
3942; CHECK-NOV-NEXT:    sw a2, 4(s0)
3943; CHECK-NOV-NEXT:    sw s1, 8(s0)
3944; CHECK-NOV-NEXT:    sw a0, 12(s0)
3945; CHECK-NOV-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
3946; CHECK-NOV-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
3947; CHECK-NOV-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
3948; CHECK-NOV-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
3949; CHECK-NOV-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
3950; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
3951; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
3952; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
3953; CHECK-NOV-NEXT:    .cfi_restore ra
3954; CHECK-NOV-NEXT:    .cfi_restore s0
3955; CHECK-NOV-NEXT:    .cfi_restore s1
3956; CHECK-NOV-NEXT:    .cfi_restore s2
3957; CHECK-NOV-NEXT:    .cfi_restore s3
3958; CHECK-NOV-NEXT:    .cfi_restore fs0
3959; CHECK-NOV-NEXT:    .cfi_restore fs1
3960; CHECK-NOV-NEXT:    .cfi_restore fs2
3961; CHECK-NOV-NEXT:    addi sp, sp, 64
3962; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
3963; CHECK-NOV-NEXT:    ret
3964; CHECK-NOV-NEXT:  .LBB33_10: # %entry
3965; CHECK-NOV-NEXT:    .cfi_restore_state
3966; CHECK-NOV-NEXT:    mv a0, a4
3967; CHECK-NOV-NEXT:    fcvt.l.s a2, fs1, rtz
3968; CHECK-NOV-NEXT:    blt s1, a4, .LBB33_2
3969; CHECK-NOV-NEXT:  .LBB33_11: # %entry
3970; CHECK-NOV-NEXT:    mv s1, a4
3971; CHECK-NOV-NEXT:    fcvt.l.s a3, fs0, rtz
3972; CHECK-NOV-NEXT:    blt a2, a4, .LBB33_3
3973; CHECK-NOV-NEXT:  .LBB33_12: # %entry
3974; CHECK-NOV-NEXT:    mv a2, a4
3975; CHECK-NOV-NEXT:    blt a3, a4, .LBB33_4
3976; CHECK-NOV-NEXT:  .LBB33_13: # %entry
3977; CHECK-NOV-NEXT:    mv a3, a4
3978; CHECK-NOV-NEXT:    blt a1, a4, .LBB33_5
3979; CHECK-NOV-NEXT:  .LBB33_14: # %entry
3980; CHECK-NOV-NEXT:    lui a3, 524288
3981; CHECK-NOV-NEXT:    blt a1, a2, .LBB33_6
3982; CHECK-NOV-NEXT:  .LBB33_15: # %entry
3983; CHECK-NOV-NEXT:    lui a2, 524288
3984; CHECK-NOV-NEXT:    blt a1, s1, .LBB33_7
3985; CHECK-NOV-NEXT:  .LBB33_16: # %entry
3986; CHECK-NOV-NEXT:    lui s1, 524288
3987; CHECK-NOV-NEXT:    bge a1, a0, .LBB33_8
3988; CHECK-NOV-NEXT:    j .LBB33_9
3989;
3990; CHECK-V-LABEL: stest_f16i32_mm:
3991; CHECK-V:       # %bb.0: # %entry
3992; CHECK-V-NEXT:    addi sp, sp, -48
3993; CHECK-V-NEXT:    .cfi_def_cfa_offset 48
3994; CHECK-V-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
3995; CHECK-V-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
3996; CHECK-V-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
3997; CHECK-V-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
3998; CHECK-V-NEXT:    .cfi_offset ra, -8
3999; CHECK-V-NEXT:    .cfi_offset s0, -16
4000; CHECK-V-NEXT:    .cfi_offset s1, -24
4001; CHECK-V-NEXT:    .cfi_offset s2, -32
4002; CHECK-V-NEXT:    csrr a1, vlenb
4003; CHECK-V-NEXT:    slli a2, a1, 1
4004; CHECK-V-NEXT:    add a1, a2, a1
4005; CHECK-V-NEXT:    sub sp, sp, a1
4006; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
4007; CHECK-V-NEXT:    lhu s0, 0(a0)
4008; CHECK-V-NEXT:    lhu s1, 8(a0)
4009; CHECK-V-NEXT:    lhu s2, 16(a0)
4010; CHECK-V-NEXT:    lhu a0, 24(a0)
4011; CHECK-V-NEXT:    fmv.w.x fa0, a0
4012; CHECK-V-NEXT:    call __extendhfsf2
4013; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
4014; CHECK-V-NEXT:    fmv.w.x fa0, s2
4015; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
4016; CHECK-V-NEXT:    vmv.s.x v8, a0
4017; CHECK-V-NEXT:    addi a0, sp, 16
4018; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
4019; CHECK-V-NEXT:    call __extendhfsf2
4020; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
4021; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
4022; CHECK-V-NEXT:    vmv.s.x v8, a0
4023; CHECK-V-NEXT:    addi a0, sp, 16
4024; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
4025; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
4026; CHECK-V-NEXT:    csrr a0, vlenb
4027; CHECK-V-NEXT:    add a0, sp, a0
4028; CHECK-V-NEXT:    addi a0, a0, 16
4029; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
4030; CHECK-V-NEXT:    fmv.w.x fa0, s1
4031; CHECK-V-NEXT:    call __extendhfsf2
4032; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
4033; CHECK-V-NEXT:    fmv.w.x fa0, s0
4034; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
4035; CHECK-V-NEXT:    vmv.s.x v8, a0
4036; CHECK-V-NEXT:    addi a0, sp, 16
4037; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
4038; CHECK-V-NEXT:    call __extendhfsf2
4039; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
4040; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
4041; CHECK-V-NEXT:    vmv.s.x v10, a0
4042; CHECK-V-NEXT:    addi a0, sp, 16
4043; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
4044; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
4045; CHECK-V-NEXT:    csrr a0, vlenb
4046; CHECK-V-NEXT:    add a0, sp, a0
4047; CHECK-V-NEXT:    addi a0, a0, 16
4048; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
4049; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
4050; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
4051; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
4052; CHECK-V-NEXT:    vnclip.wi v8, v10, 0
4053; CHECK-V-NEXT:    csrr a0, vlenb
4054; CHECK-V-NEXT:    slli a1, a0, 1
4055; CHECK-V-NEXT:    add a0, a1, a0
4056; CHECK-V-NEXT:    add sp, sp, a0
4057; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
4058; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
4059; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
4060; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
4061; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
4062; CHECK-V-NEXT:    .cfi_restore ra
4063; CHECK-V-NEXT:    .cfi_restore s0
4064; CHECK-V-NEXT:    .cfi_restore s1
4065; CHECK-V-NEXT:    .cfi_restore s2
4066; CHECK-V-NEXT:    addi sp, sp, 48
4067; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
4068; CHECK-V-NEXT:    ret
4069entry:
4070  %conv = fptosi <4 x half> %x to <4 x i64>
4071  %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 2147483647, i64 2147483647, i64 2147483647, i64 2147483647>)
4072  %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> <i64 -2147483648, i64 -2147483648, i64 -2147483648, i64 -2147483648>)
4073  %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
4074  ret <4 x i32> %conv6
4075}
4076
4077define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
4078; CHECK-NOV-LABEL: utesth_f16i32_mm:
4079; CHECK-NOV:       # %bb.0: # %entry
4080; CHECK-NOV-NEXT:    addi sp, sp, -64
4081; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 64
4082; CHECK-NOV-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
4083; CHECK-NOV-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
4084; CHECK-NOV-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
4085; CHECK-NOV-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
4086; CHECK-NOV-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
4087; CHECK-NOV-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
4088; CHECK-NOV-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
4089; CHECK-NOV-NEXT:    fsd fs2, 0(sp) # 8-byte Folded Spill
4090; CHECK-NOV-NEXT:    .cfi_offset ra, -8
4091; CHECK-NOV-NEXT:    .cfi_offset s0, -16
4092; CHECK-NOV-NEXT:    .cfi_offset s1, -24
4093; CHECK-NOV-NEXT:    .cfi_offset s2, -32
4094; CHECK-NOV-NEXT:    .cfi_offset s3, -40
4095; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
4096; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
4097; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
4098; CHECK-NOV-NEXT:    .cfi_remember_state
4099; CHECK-NOV-NEXT:    lhu s1, 0(a1)
4100; CHECK-NOV-NEXT:    lhu a2, 8(a1)
4101; CHECK-NOV-NEXT:    lhu s2, 16(a1)
4102; CHECK-NOV-NEXT:    lhu s3, 24(a1)
4103; CHECK-NOV-NEXT:    mv s0, a0
4104; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
4105; CHECK-NOV-NEXT:    call __extendhfsf2
4106; CHECK-NOV-NEXT:    fmv.s fs2, fa0
4107; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
4108; CHECK-NOV-NEXT:    call __extendhfsf2
4109; CHECK-NOV-NEXT:    fmv.s fs1, fa0
4110; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
4111; CHECK-NOV-NEXT:    call __extendhfsf2
4112; CHECK-NOV-NEXT:    fmv.s fs0, fa0
4113; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
4114; CHECK-NOV-NEXT:    fcvt.lu.s s1, fs2, rtz
4115; CHECK-NOV-NEXT:    call __extendhfsf2
4116; CHECK-NOV-NEXT:    fcvt.lu.s a0, fa0, rtz
4117; CHECK-NOV-NEXT:    li a1, -1
4118; CHECK-NOV-NEXT:    srli a1, a1, 32
4119; CHECK-NOV-NEXT:    bgeu a0, a1, .LBB34_6
4120; CHECK-NOV-NEXT:  # %bb.1: # %entry
4121; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs1, rtz
4122; CHECK-NOV-NEXT:    bgeu s1, a1, .LBB34_7
4123; CHECK-NOV-NEXT:  .LBB34_2: # %entry
4124; CHECK-NOV-NEXT:    fcvt.lu.s a3, fs0, rtz
4125; CHECK-NOV-NEXT:    bgeu a2, a1, .LBB34_8
4126; CHECK-NOV-NEXT:  .LBB34_3: # %entry
4127; CHECK-NOV-NEXT:    bltu a3, a1, .LBB34_5
4128; CHECK-NOV-NEXT:  .LBB34_4: # %entry
4129; CHECK-NOV-NEXT:    mv a3, a1
4130; CHECK-NOV-NEXT:  .LBB34_5: # %entry
4131; CHECK-NOV-NEXT:    sw a0, 0(s0)
4132; CHECK-NOV-NEXT:    sw s1, 4(s0)
4133; CHECK-NOV-NEXT:    sw a2, 8(s0)
4134; CHECK-NOV-NEXT:    sw a3, 12(s0)
4135; CHECK-NOV-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
4136; CHECK-NOV-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
4137; CHECK-NOV-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
4138; CHECK-NOV-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
4139; CHECK-NOV-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
4140; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
4141; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
4142; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
4143; CHECK-NOV-NEXT:    .cfi_restore ra
4144; CHECK-NOV-NEXT:    .cfi_restore s0
4145; CHECK-NOV-NEXT:    .cfi_restore s1
4146; CHECK-NOV-NEXT:    .cfi_restore s2
4147; CHECK-NOV-NEXT:    .cfi_restore s3
4148; CHECK-NOV-NEXT:    .cfi_restore fs0
4149; CHECK-NOV-NEXT:    .cfi_restore fs1
4150; CHECK-NOV-NEXT:    .cfi_restore fs2
4151; CHECK-NOV-NEXT:    addi sp, sp, 64
4152; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
4153; CHECK-NOV-NEXT:    ret
4154; CHECK-NOV-NEXT:  .LBB34_6: # %entry
4155; CHECK-NOV-NEXT:    .cfi_restore_state
4156; CHECK-NOV-NEXT:    mv a0, a1
4157; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs1, rtz
4158; CHECK-NOV-NEXT:    bltu s1, a1, .LBB34_2
4159; CHECK-NOV-NEXT:  .LBB34_7: # %entry
4160; CHECK-NOV-NEXT:    mv s1, a1
4161; CHECK-NOV-NEXT:    fcvt.lu.s a3, fs0, rtz
4162; CHECK-NOV-NEXT:    bltu a2, a1, .LBB34_3
4163; CHECK-NOV-NEXT:  .LBB34_8: # %entry
4164; CHECK-NOV-NEXT:    mv a2, a1
4165; CHECK-NOV-NEXT:    bgeu a3, a1, .LBB34_4
4166; CHECK-NOV-NEXT:    j .LBB34_5
4167;
4168; CHECK-V-LABEL: utesth_f16i32_mm:
4169; CHECK-V:       # %bb.0: # %entry
4170; CHECK-V-NEXT:    addi sp, sp, -48
4171; CHECK-V-NEXT:    .cfi_def_cfa_offset 48
4172; CHECK-V-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
4173; CHECK-V-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
4174; CHECK-V-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
4175; CHECK-V-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
4176; CHECK-V-NEXT:    .cfi_offset ra, -8
4177; CHECK-V-NEXT:    .cfi_offset s0, -16
4178; CHECK-V-NEXT:    .cfi_offset s1, -24
4179; CHECK-V-NEXT:    .cfi_offset s2, -32
4180; CHECK-V-NEXT:    csrr a1, vlenb
4181; CHECK-V-NEXT:    slli a2, a1, 1
4182; CHECK-V-NEXT:    add a1, a2, a1
4183; CHECK-V-NEXT:    sub sp, sp, a1
4184; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
4185; CHECK-V-NEXT:    lhu s0, 0(a0)
4186; CHECK-V-NEXT:    lhu s1, 8(a0)
4187; CHECK-V-NEXT:    lhu s2, 16(a0)
4188; CHECK-V-NEXT:    lhu a0, 24(a0)
4189; CHECK-V-NEXT:    fmv.w.x fa0, a0
4190; CHECK-V-NEXT:    call __extendhfsf2
4191; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
4192; CHECK-V-NEXT:    fmv.w.x fa0, s2
4193; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
4194; CHECK-V-NEXT:    vmv.s.x v8, a0
4195; CHECK-V-NEXT:    addi a0, sp, 16
4196; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
4197; CHECK-V-NEXT:    call __extendhfsf2
4198; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
4199; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
4200; CHECK-V-NEXT:    vmv.s.x v8, a0
4201; CHECK-V-NEXT:    addi a0, sp, 16
4202; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
4203; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
4204; CHECK-V-NEXT:    csrr a0, vlenb
4205; CHECK-V-NEXT:    add a0, sp, a0
4206; CHECK-V-NEXT:    addi a0, a0, 16
4207; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
4208; CHECK-V-NEXT:    fmv.w.x fa0, s1
4209; CHECK-V-NEXT:    call __extendhfsf2
4210; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
4211; CHECK-V-NEXT:    fmv.w.x fa0, s0
4212; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
4213; CHECK-V-NEXT:    vmv.s.x v8, a0
4214; CHECK-V-NEXT:    addi a0, sp, 16
4215; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
4216; CHECK-V-NEXT:    call __extendhfsf2
4217; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
4218; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
4219; CHECK-V-NEXT:    vmv.s.x v10, a0
4220; CHECK-V-NEXT:    addi a0, sp, 16
4221; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
4222; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
4223; CHECK-V-NEXT:    csrr a0, vlenb
4224; CHECK-V-NEXT:    add a0, sp, a0
4225; CHECK-V-NEXT:    addi a0, a0, 16
4226; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
4227; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
4228; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
4229; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
4230; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
4231; CHECK-V-NEXT:    csrr a0, vlenb
4232; CHECK-V-NEXT:    slli a1, a0, 1
4233; CHECK-V-NEXT:    add a0, a1, a0
4234; CHECK-V-NEXT:    add sp, sp, a0
4235; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
4236; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
4237; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
4238; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
4239; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
4240; CHECK-V-NEXT:    .cfi_restore ra
4241; CHECK-V-NEXT:    .cfi_restore s0
4242; CHECK-V-NEXT:    .cfi_restore s1
4243; CHECK-V-NEXT:    .cfi_restore s2
4244; CHECK-V-NEXT:    addi sp, sp, 48
4245; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
4246; CHECK-V-NEXT:    ret
4247entry:
4248  %conv = fptoui <4 x half> %x to <4 x i64>
4249  %spec.store.select = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
4250  %conv6 = trunc <4 x i64> %spec.store.select to <4 x i32>
4251  ret <4 x i32> %conv6
4252}
4253
4254define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
4255; CHECK-NOV-LABEL: ustest_f16i32_mm:
4256; CHECK-NOV:       # %bb.0: # %entry
4257; CHECK-NOV-NEXT:    addi sp, sp, -64
4258; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 64
4259; CHECK-NOV-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
4260; CHECK-NOV-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
4261; CHECK-NOV-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
4262; CHECK-NOV-NEXT:    sd s2, 32(sp) # 8-byte Folded Spill
4263; CHECK-NOV-NEXT:    sd s3, 24(sp) # 8-byte Folded Spill
4264; CHECK-NOV-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
4265; CHECK-NOV-NEXT:    fsd fs1, 8(sp) # 8-byte Folded Spill
4266; CHECK-NOV-NEXT:    fsd fs2, 0(sp) # 8-byte Folded Spill
4267; CHECK-NOV-NEXT:    .cfi_offset ra, -8
4268; CHECK-NOV-NEXT:    .cfi_offset s0, -16
4269; CHECK-NOV-NEXT:    .cfi_offset s1, -24
4270; CHECK-NOV-NEXT:    .cfi_offset s2, -32
4271; CHECK-NOV-NEXT:    .cfi_offset s3, -40
4272; CHECK-NOV-NEXT:    .cfi_offset fs0, -48
4273; CHECK-NOV-NEXT:    .cfi_offset fs1, -56
4274; CHECK-NOV-NEXT:    .cfi_offset fs2, -64
4275; CHECK-NOV-NEXT:    .cfi_remember_state
4276; CHECK-NOV-NEXT:    lhu s1, 0(a1)
4277; CHECK-NOV-NEXT:    lhu s2, 8(a1)
4278; CHECK-NOV-NEXT:    lhu a2, 16(a1)
4279; CHECK-NOV-NEXT:    lhu s3, 24(a1)
4280; CHECK-NOV-NEXT:    mv s0, a0
4281; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
4282; CHECK-NOV-NEXT:    call __extendhfsf2
4283; CHECK-NOV-NEXT:    fmv.s fs2, fa0
4284; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
4285; CHECK-NOV-NEXT:    call __extendhfsf2
4286; CHECK-NOV-NEXT:    fmv.s fs1, fa0
4287; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
4288; CHECK-NOV-NEXT:    call __extendhfsf2
4289; CHECK-NOV-NEXT:    fmv.s fs0, fa0
4290; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
4291; CHECK-NOV-NEXT:    fcvt.l.s s1, fs2, rtz
4292; CHECK-NOV-NEXT:    call __extendhfsf2
4293; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
4294; CHECK-NOV-NEXT:    li a2, -1
4295; CHECK-NOV-NEXT:    srli a2, a2, 32
4296; CHECK-NOV-NEXT:    bge a0, a2, .LBB35_6
4297; CHECK-NOV-NEXT:  # %bb.1: # %entry
4298; CHECK-NOV-NEXT:    fcvt.l.s a1, fs1, rtz
4299; CHECK-NOV-NEXT:    bge s1, a2, .LBB35_7
4300; CHECK-NOV-NEXT:  .LBB35_2: # %entry
4301; CHECK-NOV-NEXT:    fcvt.l.s a3, fs0, rtz
4302; CHECK-NOV-NEXT:    bge a1, a2, .LBB35_8
4303; CHECK-NOV-NEXT:  .LBB35_3: # %entry
4304; CHECK-NOV-NEXT:    blt a3, a2, .LBB35_5
4305; CHECK-NOV-NEXT:  .LBB35_4: # %entry
4306; CHECK-NOV-NEXT:    mv a3, a2
4307; CHECK-NOV-NEXT:  .LBB35_5: # %entry
4308; CHECK-NOV-NEXT:    sgtz a2, a3
4309; CHECK-NOV-NEXT:    negw a2, a2
4310; CHECK-NOV-NEXT:    and a2, a2, a3
4311; CHECK-NOV-NEXT:    sgtz a3, a1
4312; CHECK-NOV-NEXT:    negw a3, a3
4313; CHECK-NOV-NEXT:    and a1, a3, a1
4314; CHECK-NOV-NEXT:    sgtz a3, s1
4315; CHECK-NOV-NEXT:    negw a3, a3
4316; CHECK-NOV-NEXT:    and a3, a3, s1
4317; CHECK-NOV-NEXT:    sgtz a4, a0
4318; CHECK-NOV-NEXT:    negw a4, a4
4319; CHECK-NOV-NEXT:    and a0, a4, a0
4320; CHECK-NOV-NEXT:    sw a2, 0(s0)
4321; CHECK-NOV-NEXT:    sw a1, 4(s0)
4322; CHECK-NOV-NEXT:    sw a3, 8(s0)
4323; CHECK-NOV-NEXT:    sw a0, 12(s0)
4324; CHECK-NOV-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
4325; CHECK-NOV-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
4326; CHECK-NOV-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
4327; CHECK-NOV-NEXT:    ld s2, 32(sp) # 8-byte Folded Reload
4328; CHECK-NOV-NEXT:    ld s3, 24(sp) # 8-byte Folded Reload
4329; CHECK-NOV-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
4330; CHECK-NOV-NEXT:    fld fs1, 8(sp) # 8-byte Folded Reload
4331; CHECK-NOV-NEXT:    fld fs2, 0(sp) # 8-byte Folded Reload
4332; CHECK-NOV-NEXT:    .cfi_restore ra
4333; CHECK-NOV-NEXT:    .cfi_restore s0
4334; CHECK-NOV-NEXT:    .cfi_restore s1
4335; CHECK-NOV-NEXT:    .cfi_restore s2
4336; CHECK-NOV-NEXT:    .cfi_restore s3
4337; CHECK-NOV-NEXT:    .cfi_restore fs0
4338; CHECK-NOV-NEXT:    .cfi_restore fs1
4339; CHECK-NOV-NEXT:    .cfi_restore fs2
4340; CHECK-NOV-NEXT:    addi sp, sp, 64
4341; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
4342; CHECK-NOV-NEXT:    ret
4343; CHECK-NOV-NEXT:  .LBB35_6: # %entry
4344; CHECK-NOV-NEXT:    .cfi_restore_state
4345; CHECK-NOV-NEXT:    mv a0, a2
4346; CHECK-NOV-NEXT:    fcvt.l.s a1, fs1, rtz
4347; CHECK-NOV-NEXT:    blt s1, a2, .LBB35_2
4348; CHECK-NOV-NEXT:  .LBB35_7: # %entry
4349; CHECK-NOV-NEXT:    mv s1, a2
4350; CHECK-NOV-NEXT:    fcvt.l.s a3, fs0, rtz
4351; CHECK-NOV-NEXT:    blt a1, a2, .LBB35_3
4352; CHECK-NOV-NEXT:  .LBB35_8: # %entry
4353; CHECK-NOV-NEXT:    mv a1, a2
4354; CHECK-NOV-NEXT:    bge a3, a2, .LBB35_4
4355; CHECK-NOV-NEXT:    j .LBB35_5
4356;
4357; CHECK-V-LABEL: ustest_f16i32_mm:
4358; CHECK-V:       # %bb.0: # %entry
4359; CHECK-V-NEXT:    addi sp, sp, -48
4360; CHECK-V-NEXT:    .cfi_def_cfa_offset 48
4361; CHECK-V-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
4362; CHECK-V-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
4363; CHECK-V-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
4364; CHECK-V-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
4365; CHECK-V-NEXT:    .cfi_offset ra, -8
4366; CHECK-V-NEXT:    .cfi_offset s0, -16
4367; CHECK-V-NEXT:    .cfi_offset s1, -24
4368; CHECK-V-NEXT:    .cfi_offset s2, -32
4369; CHECK-V-NEXT:    csrr a1, vlenb
4370; CHECK-V-NEXT:    slli a2, a1, 1
4371; CHECK-V-NEXT:    add a1, a2, a1
4372; CHECK-V-NEXT:    sub sp, sp, a1
4373; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
4374; CHECK-V-NEXT:    lhu s0, 0(a0)
4375; CHECK-V-NEXT:    lhu s1, 8(a0)
4376; CHECK-V-NEXT:    lhu s2, 16(a0)
4377; CHECK-V-NEXT:    lhu a0, 24(a0)
4378; CHECK-V-NEXT:    fmv.w.x fa0, a0
4379; CHECK-V-NEXT:    call __extendhfsf2
4380; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
4381; CHECK-V-NEXT:    fmv.w.x fa0, s2
4382; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
4383; CHECK-V-NEXT:    vmv.s.x v8, a0
4384; CHECK-V-NEXT:    addi a0, sp, 16
4385; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
4386; CHECK-V-NEXT:    call __extendhfsf2
4387; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
4388; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
4389; CHECK-V-NEXT:    vmv.s.x v8, a0
4390; CHECK-V-NEXT:    addi a0, sp, 16
4391; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
4392; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
4393; CHECK-V-NEXT:    csrr a0, vlenb
4394; CHECK-V-NEXT:    add a0, sp, a0
4395; CHECK-V-NEXT:    addi a0, a0, 16
4396; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
4397; CHECK-V-NEXT:    fmv.w.x fa0, s1
4398; CHECK-V-NEXT:    call __extendhfsf2
4399; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
4400; CHECK-V-NEXT:    fmv.w.x fa0, s0
4401; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
4402; CHECK-V-NEXT:    vmv.s.x v8, a0
4403; CHECK-V-NEXT:    addi a0, sp, 16
4404; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
4405; CHECK-V-NEXT:    call __extendhfsf2
4406; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
4407; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
4408; CHECK-V-NEXT:    vmv.s.x v8, a0
4409; CHECK-V-NEXT:    addi a0, sp, 16
4410; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
4411; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
4412; CHECK-V-NEXT:    csrr a0, vlenb
4413; CHECK-V-NEXT:    add a0, sp, a0
4414; CHECK-V-NEXT:    addi a0, a0, 16
4415; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
4416; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
4417; CHECK-V-NEXT:    vslideup.vi v8, v10, 2
4418; CHECK-V-NEXT:    vmax.vx v10, v8, zero
4419; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
4420; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
4421; CHECK-V-NEXT:    csrr a0, vlenb
4422; CHECK-V-NEXT:    slli a1, a0, 1
4423; CHECK-V-NEXT:    add a0, a1, a0
4424; CHECK-V-NEXT:    add sp, sp, a0
4425; CHECK-V-NEXT:    .cfi_def_cfa sp, 48
4426; CHECK-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
4427; CHECK-V-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
4428; CHECK-V-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
4429; CHECK-V-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
4430; CHECK-V-NEXT:    .cfi_restore ra
4431; CHECK-V-NEXT:    .cfi_restore s0
4432; CHECK-V-NEXT:    .cfi_restore s1
4433; CHECK-V-NEXT:    .cfi_restore s2
4434; CHECK-V-NEXT:    addi sp, sp, 48
4435; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
4436; CHECK-V-NEXT:    ret
4437entry:
4438  %conv = fptosi <4 x half> %x to <4 x i64>
4439  %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>)
4440  %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> zeroinitializer)
4441  %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32>
4442  ret <4 x i32> %conv6
4443}
4444
4445; i16 saturate
4446
4447define <2 x i16> @stest_f64i16_mm(<2 x double> %x) {
4448; CHECK-NOV-LABEL: stest_f64i16_mm:
4449; CHECK-NOV:       # %bb.0: # %entry
4450; CHECK-NOV-NEXT:    fcvt.w.d a1, fa1, rtz
4451; CHECK-NOV-NEXT:    lui a2, 8
4452; CHECK-NOV-NEXT:    addiw a2, a2, -1
4453; CHECK-NOV-NEXT:    fcvt.w.d a0, fa0, rtz
4454; CHECK-NOV-NEXT:    bge a1, a2, .LBB36_5
4455; CHECK-NOV-NEXT:  # %bb.1: # %entry
4456; CHECK-NOV-NEXT:    bge a0, a2, .LBB36_6
4457; CHECK-NOV-NEXT:  .LBB36_2: # %entry
4458; CHECK-NOV-NEXT:    lui a2, 1048568
4459; CHECK-NOV-NEXT:    bge a2, a0, .LBB36_7
4460; CHECK-NOV-NEXT:  .LBB36_3: # %entry
4461; CHECK-NOV-NEXT:    bge a2, a1, .LBB36_8
4462; CHECK-NOV-NEXT:  .LBB36_4: # %entry
4463; CHECK-NOV-NEXT:    ret
4464; CHECK-NOV-NEXT:  .LBB36_5: # %entry
4465; CHECK-NOV-NEXT:    mv a1, a2
4466; CHECK-NOV-NEXT:    blt a0, a2, .LBB36_2
4467; CHECK-NOV-NEXT:  .LBB36_6: # %entry
4468; CHECK-NOV-NEXT:    mv a0, a2
4469; CHECK-NOV-NEXT:    lui a2, 1048568
4470; CHECK-NOV-NEXT:    blt a2, a0, .LBB36_3
4471; CHECK-NOV-NEXT:  .LBB36_7: # %entry
4472; CHECK-NOV-NEXT:    lui a0, 1048568
4473; CHECK-NOV-NEXT:    blt a2, a1, .LBB36_4
4474; CHECK-NOV-NEXT:  .LBB36_8: # %entry
4475; CHECK-NOV-NEXT:    lui a1, 1048568
4476; CHECK-NOV-NEXT:    ret
4477;
4478; CHECK-V-LABEL: stest_f64i16_mm:
4479; CHECK-V:       # %bb.0: # %entry
4480; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
4481; CHECK-V-NEXT:    vfncvt.rtz.x.f.w v9, v8
4482; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
4483; CHECK-V-NEXT:    vnclip.wi v8, v9, 0
4484; CHECK-V-NEXT:    ret
4485entry:
4486  %conv = fptosi <2 x double> %x to <2 x i32>
4487  %spec.store.select = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %conv, <2 x i32> <i32 32767, i32 32767>)
4488  %spec.store.select7 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> %spec.store.select, <2 x i32> <i32 -32768, i32 -32768>)
4489  %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16>
4490  ret <2 x i16> %conv6
4491}
4492
4493define <2 x i16> @utest_f64i16_mm(<2 x double> %x) {
4494; CHECK-NOV-LABEL: utest_f64i16_mm:
4495; CHECK-NOV:       # %bb.0: # %entry
4496; CHECK-NOV-NEXT:    fcvt.wu.d a0, fa0, rtz
4497; CHECK-NOV-NEXT:    lui a2, 16
4498; CHECK-NOV-NEXT:    addiw a2, a2, -1
4499; CHECK-NOV-NEXT:    fcvt.wu.d a1, fa1, rtz
4500; CHECK-NOV-NEXT:    bgeu a0, a2, .LBB37_3
4501; CHECK-NOV-NEXT:  # %bb.1: # %entry
4502; CHECK-NOV-NEXT:    bgeu a1, a2, .LBB37_4
4503; CHECK-NOV-NEXT:  .LBB37_2: # %entry
4504; CHECK-NOV-NEXT:    ret
4505; CHECK-NOV-NEXT:  .LBB37_3: # %entry
4506; CHECK-NOV-NEXT:    mv a0, a2
4507; CHECK-NOV-NEXT:    bltu a1, a2, .LBB37_2
4508; CHECK-NOV-NEXT:  .LBB37_4: # %entry
4509; CHECK-NOV-NEXT:    mv a1, a2
4510; CHECK-NOV-NEXT:    ret
4511;
4512; CHECK-V-LABEL: utest_f64i16_mm:
4513; CHECK-V:       # %bb.0: # %entry
4514; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
4515; CHECK-V-NEXT:    vfncvt.rtz.xu.f.w v9, v8
4516; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
4517; CHECK-V-NEXT:    vnclipu.wi v8, v9, 0
4518; CHECK-V-NEXT:    ret
4519entry:
4520  %conv = fptoui <2 x double> %x to <2 x i32>
4521  %spec.store.select = call <2 x i32> @llvm.umin.v2i32(<2 x i32> %conv, <2 x i32> <i32 65535, i32 65535>)
4522  %conv6 = trunc <2 x i32> %spec.store.select to <2 x i16>
4523  ret <2 x i16> %conv6
4524}
4525
4526define <2 x i16> @ustest_f64i16_mm(<2 x double> %x) {
4527; CHECK-NOV-LABEL: ustest_f64i16_mm:
4528; CHECK-NOV:       # %bb.0: # %entry
4529; CHECK-NOV-NEXT:    fcvt.w.d a1, fa1, rtz
4530; CHECK-NOV-NEXT:    lui a2, 16
4531; CHECK-NOV-NEXT:    addiw a2, a2, -1
4532; CHECK-NOV-NEXT:    fcvt.w.d a0, fa0, rtz
4533; CHECK-NOV-NEXT:    blt a1, a2, .LBB38_2
4534; CHECK-NOV-NEXT:  # %bb.1: # %entry
4535; CHECK-NOV-NEXT:    mv a1, a2
4536; CHECK-NOV-NEXT:  .LBB38_2: # %entry
4537; CHECK-NOV-NEXT:    blt a0, a2, .LBB38_4
4538; CHECK-NOV-NEXT:  # %bb.3: # %entry
4539; CHECK-NOV-NEXT:    mv a0, a2
4540; CHECK-NOV-NEXT:  .LBB38_4: # %entry
4541; CHECK-NOV-NEXT:    sgtz a2, a0
4542; CHECK-NOV-NEXT:    neg a2, a2
4543; CHECK-NOV-NEXT:    and a0, a2, a0
4544; CHECK-NOV-NEXT:    sgtz a2, a1
4545; CHECK-NOV-NEXT:    neg a2, a2
4546; CHECK-NOV-NEXT:    and a1, a2, a1
4547; CHECK-NOV-NEXT:    ret
4548;
4549; CHECK-V-LABEL: ustest_f64i16_mm:
4550; CHECK-V:       # %bb.0: # %entry
4551; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
4552; CHECK-V-NEXT:    vfncvt.rtz.x.f.w v9, v8
4553; CHECK-V-NEXT:    vmax.vx v8, v9, zero
4554; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
4555; CHECK-V-NEXT:    vnclipu.wi v8, v8, 0
4556; CHECK-V-NEXT:    ret
4557entry:
4558  %conv = fptosi <2 x double> %x to <2 x i32>
4559  %spec.store.select = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %conv, <2 x i32> <i32 65535, i32 65535>)
4560  %spec.store.select7 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> %spec.store.select, <2 x i32> zeroinitializer)
4561  %conv6 = trunc <2 x i32> %spec.store.select7 to <2 x i16>
4562  ret <2 x i16> %conv6
4563}
4564
4565define <4 x i16> @stest_f32i16_mm(<4 x float> %x) {
4566; CHECK-NOV-LABEL: stest_f32i16_mm:
4567; CHECK-NOV:       # %bb.0: # %entry
4568; CHECK-NOV-NEXT:    fcvt.w.s a1, fa3, rtz
4569; CHECK-NOV-NEXT:    lui a5, 8
4570; CHECK-NOV-NEXT:    addiw a5, a5, -1
4571; CHECK-NOV-NEXT:    fcvt.w.s a2, fa2, rtz
4572; CHECK-NOV-NEXT:    bge a1, a5, .LBB39_10
4573; CHECK-NOV-NEXT:  # %bb.1: # %entry
4574; CHECK-NOV-NEXT:    fcvt.w.s a3, fa1, rtz
4575; CHECK-NOV-NEXT:    bge a2, a5, .LBB39_11
4576; CHECK-NOV-NEXT:  .LBB39_2: # %entry
4577; CHECK-NOV-NEXT:    fcvt.w.s a4, fa0, rtz
4578; CHECK-NOV-NEXT:    bge a3, a5, .LBB39_12
4579; CHECK-NOV-NEXT:  .LBB39_3: # %entry
4580; CHECK-NOV-NEXT:    bge a4, a5, .LBB39_13
4581; CHECK-NOV-NEXT:  .LBB39_4: # %entry
4582; CHECK-NOV-NEXT:    lui a5, 1048568
4583; CHECK-NOV-NEXT:    bge a5, a4, .LBB39_14
4584; CHECK-NOV-NEXT:  .LBB39_5: # %entry
4585; CHECK-NOV-NEXT:    bge a5, a3, .LBB39_15
4586; CHECK-NOV-NEXT:  .LBB39_6: # %entry
4587; CHECK-NOV-NEXT:    bge a5, a2, .LBB39_16
4588; CHECK-NOV-NEXT:  .LBB39_7: # %entry
4589; CHECK-NOV-NEXT:    blt a5, a1, .LBB39_9
4590; CHECK-NOV-NEXT:  .LBB39_8: # %entry
4591; CHECK-NOV-NEXT:    lui a1, 1048568
4592; CHECK-NOV-NEXT:  .LBB39_9: # %entry
4593; CHECK-NOV-NEXT:    sh a4, 0(a0)
4594; CHECK-NOV-NEXT:    sh a3, 2(a0)
4595; CHECK-NOV-NEXT:    sh a2, 4(a0)
4596; CHECK-NOV-NEXT:    sh a1, 6(a0)
4597; CHECK-NOV-NEXT:    ret
4598; CHECK-NOV-NEXT:  .LBB39_10: # %entry
4599; CHECK-NOV-NEXT:    mv a1, a5
4600; CHECK-NOV-NEXT:    fcvt.w.s a3, fa1, rtz
4601; CHECK-NOV-NEXT:    blt a2, a5, .LBB39_2
4602; CHECK-NOV-NEXT:  .LBB39_11: # %entry
4603; CHECK-NOV-NEXT:    mv a2, a5
4604; CHECK-NOV-NEXT:    fcvt.w.s a4, fa0, rtz
4605; CHECK-NOV-NEXT:    blt a3, a5, .LBB39_3
4606; CHECK-NOV-NEXT:  .LBB39_12: # %entry
4607; CHECK-NOV-NEXT:    mv a3, a5
4608; CHECK-NOV-NEXT:    blt a4, a5, .LBB39_4
4609; CHECK-NOV-NEXT:  .LBB39_13: # %entry
4610; CHECK-NOV-NEXT:    mv a4, a5
4611; CHECK-NOV-NEXT:    lui a5, 1048568
4612; CHECK-NOV-NEXT:    blt a5, a4, .LBB39_5
4613; CHECK-NOV-NEXT:  .LBB39_14: # %entry
4614; CHECK-NOV-NEXT:    lui a4, 1048568
4615; CHECK-NOV-NEXT:    blt a5, a3, .LBB39_6
4616; CHECK-NOV-NEXT:  .LBB39_15: # %entry
4617; CHECK-NOV-NEXT:    lui a3, 1048568
4618; CHECK-NOV-NEXT:    blt a5, a2, .LBB39_7
4619; CHECK-NOV-NEXT:  .LBB39_16: # %entry
4620; CHECK-NOV-NEXT:    lui a2, 1048568
4621; CHECK-NOV-NEXT:    bge a5, a1, .LBB39_8
4622; CHECK-NOV-NEXT:    j .LBB39_9
4623;
4624; CHECK-V-LABEL: stest_f32i16_mm:
4625; CHECK-V:       # %bb.0: # %entry
4626; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
4627; CHECK-V-NEXT:    vfcvt.rtz.x.f.v v8, v8
4628; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
4629; CHECK-V-NEXT:    vnclip.wi v8, v8, 0
4630; CHECK-V-NEXT:    ret
4631entry:
4632  %conv = fptosi <4 x float> %x to <4 x i32>
4633  %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %conv, <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>)
4634  %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>)
4635  %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16>
4636  ret <4 x i16> %conv6
4637}
4638
4639define <4 x i16> @utest_f32i16_mm(<4 x float> %x) {
4640; CHECK-NOV-LABEL: utest_f32i16_mm:
4641; CHECK-NOV:       # %bb.0: # %entry
4642; CHECK-NOV-NEXT:    fcvt.wu.s a1, fa0, rtz
4643; CHECK-NOV-NEXT:    lui a3, 16
4644; CHECK-NOV-NEXT:    addiw a3, a3, -1
4645; CHECK-NOV-NEXT:    fcvt.wu.s a2, fa1, rtz
4646; CHECK-NOV-NEXT:    bgeu a1, a3, .LBB40_6
4647; CHECK-NOV-NEXT:  # %bb.1: # %entry
4648; CHECK-NOV-NEXT:    fcvt.wu.s a4, fa2, rtz
4649; CHECK-NOV-NEXT:    bgeu a2, a3, .LBB40_7
4650; CHECK-NOV-NEXT:  .LBB40_2: # %entry
4651; CHECK-NOV-NEXT:    fcvt.wu.s a5, fa3, rtz
4652; CHECK-NOV-NEXT:    bgeu a4, a3, .LBB40_8
4653; CHECK-NOV-NEXT:  .LBB40_3: # %entry
4654; CHECK-NOV-NEXT:    bltu a5, a3, .LBB40_5
4655; CHECK-NOV-NEXT:  .LBB40_4: # %entry
4656; CHECK-NOV-NEXT:    mv a5, a3
4657; CHECK-NOV-NEXT:  .LBB40_5: # %entry
4658; CHECK-NOV-NEXT:    sh a1, 0(a0)
4659; CHECK-NOV-NEXT:    sh a2, 2(a0)
4660; CHECK-NOV-NEXT:    sh a4, 4(a0)
4661; CHECK-NOV-NEXT:    sh a5, 6(a0)
4662; CHECK-NOV-NEXT:    ret
4663; CHECK-NOV-NEXT:  .LBB40_6: # %entry
4664; CHECK-NOV-NEXT:    mv a1, a3
4665; CHECK-NOV-NEXT:    fcvt.wu.s a4, fa2, rtz
4666; CHECK-NOV-NEXT:    bltu a2, a3, .LBB40_2
4667; CHECK-NOV-NEXT:  .LBB40_7: # %entry
4668; CHECK-NOV-NEXT:    mv a2, a3
4669; CHECK-NOV-NEXT:    fcvt.wu.s a5, fa3, rtz
4670; CHECK-NOV-NEXT:    bltu a4, a3, .LBB40_3
4671; CHECK-NOV-NEXT:  .LBB40_8: # %entry
4672; CHECK-NOV-NEXT:    mv a4, a3
4673; CHECK-NOV-NEXT:    bgeu a5, a3, .LBB40_4
4674; CHECK-NOV-NEXT:    j .LBB40_5
4675;
4676; CHECK-V-LABEL: utest_f32i16_mm:
4677; CHECK-V:       # %bb.0: # %entry
4678; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
4679; CHECK-V-NEXT:    vfcvt.rtz.xu.f.v v8, v8
4680; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
4681; CHECK-V-NEXT:    vnclipu.wi v8, v8, 0
4682; CHECK-V-NEXT:    ret
4683entry:
4684  %conv = fptoui <4 x float> %x to <4 x i32>
4685  %spec.store.select = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>)
4686  %conv6 = trunc <4 x i32> %spec.store.select to <4 x i16>
4687  ret <4 x i16> %conv6
4688}
4689
4690define <4 x i16> @ustest_f32i16_mm(<4 x float> %x) {
4691; CHECK-NOV-LABEL: ustest_f32i16_mm:
4692; CHECK-NOV:       # %bb.0: # %entry
4693; CHECK-NOV-NEXT:    fcvt.w.s a1, fa3, rtz
4694; CHECK-NOV-NEXT:    lui a3, 16
4695; CHECK-NOV-NEXT:    addiw a3, a3, -1
4696; CHECK-NOV-NEXT:    fcvt.w.s a2, fa2, rtz
4697; CHECK-NOV-NEXT:    bge a1, a3, .LBB41_6
4698; CHECK-NOV-NEXT:  # %bb.1: # %entry
4699; CHECK-NOV-NEXT:    fcvt.w.s a4, fa1, rtz
4700; CHECK-NOV-NEXT:    bge a2, a3, .LBB41_7
4701; CHECK-NOV-NEXT:  .LBB41_2: # %entry
4702; CHECK-NOV-NEXT:    fcvt.w.s a5, fa0, rtz
4703; CHECK-NOV-NEXT:    bge a4, a3, .LBB41_8
4704; CHECK-NOV-NEXT:  .LBB41_3: # %entry
4705; CHECK-NOV-NEXT:    blt a5, a3, .LBB41_5
4706; CHECK-NOV-NEXT:  .LBB41_4: # %entry
4707; CHECK-NOV-NEXT:    mv a5, a3
4708; CHECK-NOV-NEXT:  .LBB41_5: # %entry
4709; CHECK-NOV-NEXT:    sgtz a3, a5
4710; CHECK-NOV-NEXT:    negw a3, a3
4711; CHECK-NOV-NEXT:    and a3, a3, a5
4712; CHECK-NOV-NEXT:    sgtz a5, a4
4713; CHECK-NOV-NEXT:    negw a5, a5
4714; CHECK-NOV-NEXT:    and a4, a5, a4
4715; CHECK-NOV-NEXT:    sgtz a5, a2
4716; CHECK-NOV-NEXT:    negw a5, a5
4717; CHECK-NOV-NEXT:    and a2, a5, a2
4718; CHECK-NOV-NEXT:    sgtz a5, a1
4719; CHECK-NOV-NEXT:    negw a5, a5
4720; CHECK-NOV-NEXT:    and a1, a5, a1
4721; CHECK-NOV-NEXT:    sh a3, 0(a0)
4722; CHECK-NOV-NEXT:    sh a4, 2(a0)
4723; CHECK-NOV-NEXT:    sh a2, 4(a0)
4724; CHECK-NOV-NEXT:    sh a1, 6(a0)
4725; CHECK-NOV-NEXT:    ret
4726; CHECK-NOV-NEXT:  .LBB41_6: # %entry
4727; CHECK-NOV-NEXT:    mv a1, a3
4728; CHECK-NOV-NEXT:    fcvt.w.s a4, fa1, rtz
4729; CHECK-NOV-NEXT:    blt a2, a3, .LBB41_2
4730; CHECK-NOV-NEXT:  .LBB41_7: # %entry
4731; CHECK-NOV-NEXT:    mv a2, a3
4732; CHECK-NOV-NEXT:    fcvt.w.s a5, fa0, rtz
4733; CHECK-NOV-NEXT:    blt a4, a3, .LBB41_3
4734; CHECK-NOV-NEXT:  .LBB41_8: # %entry
4735; CHECK-NOV-NEXT:    mv a4, a3
4736; CHECK-NOV-NEXT:    bge a5, a3, .LBB41_4
4737; CHECK-NOV-NEXT:    j .LBB41_5
4738;
4739; CHECK-V-LABEL: ustest_f32i16_mm:
4740; CHECK-V:       # %bb.0: # %entry
4741; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
4742; CHECK-V-NEXT:    vfcvt.rtz.x.f.v v8, v8
4743; CHECK-V-NEXT:    vmax.vx v8, v8, zero
4744; CHECK-V-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
4745; CHECK-V-NEXT:    vnclipu.wi v8, v8, 0
4746; CHECK-V-NEXT:    ret
4747entry:
4748  %conv = fptosi <4 x float> %x to <4 x i32>
4749  %spec.store.select = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %conv, <4 x i32> <i32 65535, i32 65535, i32 65535, i32 65535>)
4750  %spec.store.select7 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %spec.store.select, <4 x i32> zeroinitializer)
4751  %conv6 = trunc <4 x i32> %spec.store.select7 to <4 x i16>
4752  ret <4 x i16> %conv6
4753}
4754
4755define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
4756; CHECK-NOV-LABEL: stest_f16i16_mm:
4757; CHECK-NOV:       # %bb.0: # %entry
4758; CHECK-NOV-NEXT:    addi sp, sp, -128
4759; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 128
4760; CHECK-NOV-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
4761; CHECK-NOV-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
4762; CHECK-NOV-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
4763; CHECK-NOV-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
4764; CHECK-NOV-NEXT:    sd s3, 88(sp) # 8-byte Folded Spill
4765; CHECK-NOV-NEXT:    sd s4, 80(sp) # 8-byte Folded Spill
4766; CHECK-NOV-NEXT:    sd s5, 72(sp) # 8-byte Folded Spill
4767; CHECK-NOV-NEXT:    sd s6, 64(sp) # 8-byte Folded Spill
4768; CHECK-NOV-NEXT:    sd s7, 56(sp) # 8-byte Folded Spill
4769; CHECK-NOV-NEXT:    fsd fs0, 48(sp) # 8-byte Folded Spill
4770; CHECK-NOV-NEXT:    fsd fs1, 40(sp) # 8-byte Folded Spill
4771; CHECK-NOV-NEXT:    fsd fs2, 32(sp) # 8-byte Folded Spill
4772; CHECK-NOV-NEXT:    fsd fs3, 24(sp) # 8-byte Folded Spill
4773; CHECK-NOV-NEXT:    fsd fs4, 16(sp) # 8-byte Folded Spill
4774; CHECK-NOV-NEXT:    fsd fs5, 8(sp) # 8-byte Folded Spill
4775; CHECK-NOV-NEXT:    fsd fs6, 0(sp) # 8-byte Folded Spill
4776; CHECK-NOV-NEXT:    .cfi_offset ra, -8
4777; CHECK-NOV-NEXT:    .cfi_offset s0, -16
4778; CHECK-NOV-NEXT:    .cfi_offset s1, -24
4779; CHECK-NOV-NEXT:    .cfi_offset s2, -32
4780; CHECK-NOV-NEXT:    .cfi_offset s3, -40
4781; CHECK-NOV-NEXT:    .cfi_offset s4, -48
4782; CHECK-NOV-NEXT:    .cfi_offset s5, -56
4783; CHECK-NOV-NEXT:    .cfi_offset s6, -64
4784; CHECK-NOV-NEXT:    .cfi_offset s7, -72
4785; CHECK-NOV-NEXT:    .cfi_offset fs0, -80
4786; CHECK-NOV-NEXT:    .cfi_offset fs1, -88
4787; CHECK-NOV-NEXT:    .cfi_offset fs2, -96
4788; CHECK-NOV-NEXT:    .cfi_offset fs3, -104
4789; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
4790; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
4791; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
4792; CHECK-NOV-NEXT:    .cfi_remember_state
4793; CHECK-NOV-NEXT:    lhu s1, 32(a1)
4794; CHECK-NOV-NEXT:    lhu s2, 40(a1)
4795; CHECK-NOV-NEXT:    lhu a2, 48(a1)
4796; CHECK-NOV-NEXT:    lhu s3, 56(a1)
4797; CHECK-NOV-NEXT:    lhu s4, 0(a1)
4798; CHECK-NOV-NEXT:    lhu s5, 8(a1)
4799; CHECK-NOV-NEXT:    lhu s6, 16(a1)
4800; CHECK-NOV-NEXT:    lhu s7, 24(a1)
4801; CHECK-NOV-NEXT:    mv s0, a0
4802; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
4803; CHECK-NOV-NEXT:    call __extendhfsf2
4804; CHECK-NOV-NEXT:    fmv.s fs6, fa0
4805; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
4806; CHECK-NOV-NEXT:    call __extendhfsf2
4807; CHECK-NOV-NEXT:    fmv.s fs5, fa0
4808; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
4809; CHECK-NOV-NEXT:    call __extendhfsf2
4810; CHECK-NOV-NEXT:    fmv.s fs4, fa0
4811; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
4812; CHECK-NOV-NEXT:    call __extendhfsf2
4813; CHECK-NOV-NEXT:    fmv.s fs3, fa0
4814; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
4815; CHECK-NOV-NEXT:    call __extendhfsf2
4816; CHECK-NOV-NEXT:    fmv.s fs2, fa0
4817; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
4818; CHECK-NOV-NEXT:    call __extendhfsf2
4819; CHECK-NOV-NEXT:    fmv.s fs1, fa0
4820; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
4821; CHECK-NOV-NEXT:    call __extendhfsf2
4822; CHECK-NOV-NEXT:    fmv.s fs0, fa0
4823; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
4824; CHECK-NOV-NEXT:    fcvt.l.s s1, fs6, rtz
4825; CHECK-NOV-NEXT:    call __extendhfsf2
4826; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
4827; CHECK-NOV-NEXT:    lui a7, 8
4828; CHECK-NOV-NEXT:    addiw a7, a7, -1
4829; CHECK-NOV-NEXT:    bge a0, a7, .LBB42_18
4830; CHECK-NOV-NEXT:  # %bb.1: # %entry
4831; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
4832; CHECK-NOV-NEXT:    bge s1, a7, .LBB42_19
4833; CHECK-NOV-NEXT:  .LBB42_2: # %entry
4834; CHECK-NOV-NEXT:    fcvt.l.s a3, fs4, rtz
4835; CHECK-NOV-NEXT:    bge a1, a7, .LBB42_20
4836; CHECK-NOV-NEXT:  .LBB42_3: # %entry
4837; CHECK-NOV-NEXT:    fcvt.l.s a2, fs3, rtz
4838; CHECK-NOV-NEXT:    bge a3, a7, .LBB42_21
4839; CHECK-NOV-NEXT:  .LBB42_4: # %entry
4840; CHECK-NOV-NEXT:    fcvt.l.s a4, fs2, rtz
4841; CHECK-NOV-NEXT:    bge a2, a7, .LBB42_22
4842; CHECK-NOV-NEXT:  .LBB42_5: # %entry
4843; CHECK-NOV-NEXT:    fcvt.l.s a5, fs1, rtz
4844; CHECK-NOV-NEXT:    bge a4, a7, .LBB42_23
4845; CHECK-NOV-NEXT:  .LBB42_6: # %entry
4846; CHECK-NOV-NEXT:    fcvt.l.s a6, fs0, rtz
4847; CHECK-NOV-NEXT:    bge a5, a7, .LBB42_24
4848; CHECK-NOV-NEXT:  .LBB42_7: # %entry
4849; CHECK-NOV-NEXT:    bge a6, a7, .LBB42_25
4850; CHECK-NOV-NEXT:  .LBB42_8: # %entry
4851; CHECK-NOV-NEXT:    lui a7, 1048568
4852; CHECK-NOV-NEXT:    bge a7, a6, .LBB42_26
4853; CHECK-NOV-NEXT:  .LBB42_9: # %entry
4854; CHECK-NOV-NEXT:    bge a7, a5, .LBB42_27
4855; CHECK-NOV-NEXT:  .LBB42_10: # %entry
4856; CHECK-NOV-NEXT:    bge a7, a4, .LBB42_28
4857; CHECK-NOV-NEXT:  .LBB42_11: # %entry
4858; CHECK-NOV-NEXT:    bge a7, a2, .LBB42_29
4859; CHECK-NOV-NEXT:  .LBB42_12: # %entry
4860; CHECK-NOV-NEXT:    bge a7, a3, .LBB42_30
4861; CHECK-NOV-NEXT:  .LBB42_13: # %entry
4862; CHECK-NOV-NEXT:    bge a7, a1, .LBB42_31
4863; CHECK-NOV-NEXT:  .LBB42_14: # %entry
4864; CHECK-NOV-NEXT:    bge a7, s1, .LBB42_32
4865; CHECK-NOV-NEXT:  .LBB42_15: # %entry
4866; CHECK-NOV-NEXT:    blt a7, a0, .LBB42_17
4867; CHECK-NOV-NEXT:  .LBB42_16: # %entry
4868; CHECK-NOV-NEXT:    lui a0, 1048568
4869; CHECK-NOV-NEXT:  .LBB42_17: # %entry
4870; CHECK-NOV-NEXT:    sh a3, 8(s0)
4871; CHECK-NOV-NEXT:    sh a1, 10(s0)
4872; CHECK-NOV-NEXT:    sh s1, 12(s0)
4873; CHECK-NOV-NEXT:    sh a0, 14(s0)
4874; CHECK-NOV-NEXT:    sh a6, 0(s0)
4875; CHECK-NOV-NEXT:    sh a5, 2(s0)
4876; CHECK-NOV-NEXT:    sh a4, 4(s0)
4877; CHECK-NOV-NEXT:    sh a2, 6(s0)
4878; CHECK-NOV-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
4879; CHECK-NOV-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
4880; CHECK-NOV-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
4881; CHECK-NOV-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
4882; CHECK-NOV-NEXT:    ld s3, 88(sp) # 8-byte Folded Reload
4883; CHECK-NOV-NEXT:    ld s4, 80(sp) # 8-byte Folded Reload
4884; CHECK-NOV-NEXT:    ld s5, 72(sp) # 8-byte Folded Reload
4885; CHECK-NOV-NEXT:    ld s6, 64(sp) # 8-byte Folded Reload
4886; CHECK-NOV-NEXT:    ld s7, 56(sp) # 8-byte Folded Reload
4887; CHECK-NOV-NEXT:    fld fs0, 48(sp) # 8-byte Folded Reload
4888; CHECK-NOV-NEXT:    fld fs1, 40(sp) # 8-byte Folded Reload
4889; CHECK-NOV-NEXT:    fld fs2, 32(sp) # 8-byte Folded Reload
4890; CHECK-NOV-NEXT:    fld fs3, 24(sp) # 8-byte Folded Reload
4891; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
4892; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
4893; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
4894; CHECK-NOV-NEXT:    .cfi_restore ra
4895; CHECK-NOV-NEXT:    .cfi_restore s0
4896; CHECK-NOV-NEXT:    .cfi_restore s1
4897; CHECK-NOV-NEXT:    .cfi_restore s2
4898; CHECK-NOV-NEXT:    .cfi_restore s3
4899; CHECK-NOV-NEXT:    .cfi_restore s4
4900; CHECK-NOV-NEXT:    .cfi_restore s5
4901; CHECK-NOV-NEXT:    .cfi_restore s6
4902; CHECK-NOV-NEXT:    .cfi_restore s7
4903; CHECK-NOV-NEXT:    .cfi_restore fs0
4904; CHECK-NOV-NEXT:    .cfi_restore fs1
4905; CHECK-NOV-NEXT:    .cfi_restore fs2
4906; CHECK-NOV-NEXT:    .cfi_restore fs3
4907; CHECK-NOV-NEXT:    .cfi_restore fs4
4908; CHECK-NOV-NEXT:    .cfi_restore fs5
4909; CHECK-NOV-NEXT:    .cfi_restore fs6
4910; CHECK-NOV-NEXT:    addi sp, sp, 128
4911; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
4912; CHECK-NOV-NEXT:    ret
4913; CHECK-NOV-NEXT:  .LBB42_18: # %entry
4914; CHECK-NOV-NEXT:    .cfi_restore_state
4915; CHECK-NOV-NEXT:    mv a0, a7
4916; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
4917; CHECK-NOV-NEXT:    blt s1, a7, .LBB42_2
4918; CHECK-NOV-NEXT:  .LBB42_19: # %entry
4919; CHECK-NOV-NEXT:    mv s1, a7
4920; CHECK-NOV-NEXT:    fcvt.l.s a3, fs4, rtz
4921; CHECK-NOV-NEXT:    blt a1, a7, .LBB42_3
4922; CHECK-NOV-NEXT:  .LBB42_20: # %entry
4923; CHECK-NOV-NEXT:    mv a1, a7
4924; CHECK-NOV-NEXT:    fcvt.l.s a2, fs3, rtz
4925; CHECK-NOV-NEXT:    blt a3, a7, .LBB42_4
4926; CHECK-NOV-NEXT:  .LBB42_21: # %entry
4927; CHECK-NOV-NEXT:    mv a3, a7
4928; CHECK-NOV-NEXT:    fcvt.l.s a4, fs2, rtz
4929; CHECK-NOV-NEXT:    blt a2, a7, .LBB42_5
4930; CHECK-NOV-NEXT:  .LBB42_22: # %entry
4931; CHECK-NOV-NEXT:    mv a2, a7
4932; CHECK-NOV-NEXT:    fcvt.l.s a5, fs1, rtz
4933; CHECK-NOV-NEXT:    blt a4, a7, .LBB42_6
4934; CHECK-NOV-NEXT:  .LBB42_23: # %entry
4935; CHECK-NOV-NEXT:    mv a4, a7
4936; CHECK-NOV-NEXT:    fcvt.l.s a6, fs0, rtz
4937; CHECK-NOV-NEXT:    blt a5, a7, .LBB42_7
4938; CHECK-NOV-NEXT:  .LBB42_24: # %entry
4939; CHECK-NOV-NEXT:    mv a5, a7
4940; CHECK-NOV-NEXT:    blt a6, a7, .LBB42_8
4941; CHECK-NOV-NEXT:  .LBB42_25: # %entry
4942; CHECK-NOV-NEXT:    mv a6, a7
4943; CHECK-NOV-NEXT:    lui a7, 1048568
4944; CHECK-NOV-NEXT:    blt a7, a6, .LBB42_9
4945; CHECK-NOV-NEXT:  .LBB42_26: # %entry
4946; CHECK-NOV-NEXT:    lui a6, 1048568
4947; CHECK-NOV-NEXT:    blt a7, a5, .LBB42_10
4948; CHECK-NOV-NEXT:  .LBB42_27: # %entry
4949; CHECK-NOV-NEXT:    lui a5, 1048568
4950; CHECK-NOV-NEXT:    blt a7, a4, .LBB42_11
4951; CHECK-NOV-NEXT:  .LBB42_28: # %entry
4952; CHECK-NOV-NEXT:    lui a4, 1048568
4953; CHECK-NOV-NEXT:    blt a7, a2, .LBB42_12
4954; CHECK-NOV-NEXT:  .LBB42_29: # %entry
4955; CHECK-NOV-NEXT:    lui a2, 1048568
4956; CHECK-NOV-NEXT:    blt a7, a3, .LBB42_13
4957; CHECK-NOV-NEXT:  .LBB42_30: # %entry
4958; CHECK-NOV-NEXT:    lui a3, 1048568
4959; CHECK-NOV-NEXT:    blt a7, a1, .LBB42_14
4960; CHECK-NOV-NEXT:  .LBB42_31: # %entry
4961; CHECK-NOV-NEXT:    lui a1, 1048568
4962; CHECK-NOV-NEXT:    blt a7, s1, .LBB42_15
4963; CHECK-NOV-NEXT:  .LBB42_32: # %entry
4964; CHECK-NOV-NEXT:    lui s1, 1048568
4965; CHECK-NOV-NEXT:    bge a7, a0, .LBB42_16
4966; CHECK-NOV-NEXT:    j .LBB42_17
4967;
4968; CHECK-V-LABEL: stest_f16i16_mm:
4969; CHECK-V:       # %bb.0: # %entry
4970; CHECK-V-NEXT:    addi sp, sp, -80
4971; CHECK-V-NEXT:    .cfi_def_cfa_offset 80
4972; CHECK-V-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
4973; CHECK-V-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
4974; CHECK-V-NEXT:    sd s1, 56(sp) # 8-byte Folded Spill
4975; CHECK-V-NEXT:    sd s2, 48(sp) # 8-byte Folded Spill
4976; CHECK-V-NEXT:    sd s3, 40(sp) # 8-byte Folded Spill
4977; CHECK-V-NEXT:    sd s4, 32(sp) # 8-byte Folded Spill
4978; CHECK-V-NEXT:    sd s5, 24(sp) # 8-byte Folded Spill
4979; CHECK-V-NEXT:    sd s6, 16(sp) # 8-byte Folded Spill
4980; CHECK-V-NEXT:    .cfi_offset ra, -8
4981; CHECK-V-NEXT:    .cfi_offset s0, -16
4982; CHECK-V-NEXT:    .cfi_offset s1, -24
4983; CHECK-V-NEXT:    .cfi_offset s2, -32
4984; CHECK-V-NEXT:    .cfi_offset s3, -40
4985; CHECK-V-NEXT:    .cfi_offset s4, -48
4986; CHECK-V-NEXT:    .cfi_offset s5, -56
4987; CHECK-V-NEXT:    .cfi_offset s6, -64
4988; CHECK-V-NEXT:    csrr a1, vlenb
4989; CHECK-V-NEXT:    slli a1, a1, 2
4990; CHECK-V-NEXT:    sub sp, sp, a1
4991; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
4992; CHECK-V-NEXT:    lhu s0, 0(a0)
4993; CHECK-V-NEXT:    lhu s1, 8(a0)
4994; CHECK-V-NEXT:    lhu s2, 16(a0)
4995; CHECK-V-NEXT:    lhu s3, 24(a0)
4996; CHECK-V-NEXT:    lhu s4, 32(a0)
4997; CHECK-V-NEXT:    lhu s5, 40(a0)
4998; CHECK-V-NEXT:    lhu s6, 48(a0)
4999; CHECK-V-NEXT:    lhu a0, 56(a0)
5000; CHECK-V-NEXT:    fmv.w.x fa0, a0
5001; CHECK-V-NEXT:    call __extendhfsf2
5002; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5003; CHECK-V-NEXT:    fmv.w.x fa0, s6
5004; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5005; CHECK-V-NEXT:    vmv.s.x v8, a0
5006; CHECK-V-NEXT:    csrr a0, vlenb
5007; CHECK-V-NEXT:    slli a0, a0, 1
5008; CHECK-V-NEXT:    add a0, sp, a0
5009; CHECK-V-NEXT:    addi a0, a0, 16
5010; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5011; CHECK-V-NEXT:    call __extendhfsf2
5012; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5013; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5014; CHECK-V-NEXT:    vmv.s.x v8, a0
5015; CHECK-V-NEXT:    csrr a0, vlenb
5016; CHECK-V-NEXT:    slli a0, a0, 1
5017; CHECK-V-NEXT:    add a0, sp, a0
5018; CHECK-V-NEXT:    addi a0, a0, 16
5019; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5020; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
5021; CHECK-V-NEXT:    csrr a0, vlenb
5022; CHECK-V-NEXT:    add a0, sp, a0
5023; CHECK-V-NEXT:    addi a0, a0, 16
5024; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5025; CHECK-V-NEXT:    fmv.w.x fa0, s5
5026; CHECK-V-NEXT:    call __extendhfsf2
5027; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5028; CHECK-V-NEXT:    fmv.w.x fa0, s4
5029; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5030; CHECK-V-NEXT:    vmv.s.x v8, a0
5031; CHECK-V-NEXT:    csrr a0, vlenb
5032; CHECK-V-NEXT:    slli a0, a0, 1
5033; CHECK-V-NEXT:    add a0, sp, a0
5034; CHECK-V-NEXT:    addi a0, a0, 16
5035; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5036; CHECK-V-NEXT:    call __extendhfsf2
5037; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5038; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5039; CHECK-V-NEXT:    vmv.s.x v8, a0
5040; CHECK-V-NEXT:    csrr a0, vlenb
5041; CHECK-V-NEXT:    slli a0, a0, 1
5042; CHECK-V-NEXT:    add a0, sp, a0
5043; CHECK-V-NEXT:    addi a0, a0, 16
5044; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5045; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
5046; CHECK-V-NEXT:    csrr a0, vlenb
5047; CHECK-V-NEXT:    add a0, sp, a0
5048; CHECK-V-NEXT:    addi a0, a0, 16
5049; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5050; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
5051; CHECK-V-NEXT:    vslideup.vi v8, v9, 2
5052; CHECK-V-NEXT:    csrr a0, vlenb
5053; CHECK-V-NEXT:    slli a0, a0, 1
5054; CHECK-V-NEXT:    add a0, sp, a0
5055; CHECK-V-NEXT:    addi a0, a0, 16
5056; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
5057; CHECK-V-NEXT:    fmv.w.x fa0, s3
5058; CHECK-V-NEXT:    call __extendhfsf2
5059; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5060; CHECK-V-NEXT:    fmv.w.x fa0, s2
5061; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5062; CHECK-V-NEXT:    vmv.s.x v8, a0
5063; CHECK-V-NEXT:    addi a0, sp, 16
5064; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5065; CHECK-V-NEXT:    call __extendhfsf2
5066; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5067; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5068; CHECK-V-NEXT:    vmv.s.x v8, a0
5069; CHECK-V-NEXT:    addi a0, sp, 16
5070; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5071; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
5072; CHECK-V-NEXT:    csrr a0, vlenb
5073; CHECK-V-NEXT:    add a0, sp, a0
5074; CHECK-V-NEXT:    addi a0, a0, 16
5075; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5076; CHECK-V-NEXT:    fmv.w.x fa0, s1
5077; CHECK-V-NEXT:    call __extendhfsf2
5078; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5079; CHECK-V-NEXT:    fmv.w.x fa0, s0
5080; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5081; CHECK-V-NEXT:    vmv.s.x v8, a0
5082; CHECK-V-NEXT:    addi a0, sp, 16
5083; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5084; CHECK-V-NEXT:    call __extendhfsf2
5085; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5086; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5087; CHECK-V-NEXT:    vmv.s.x v10, a0
5088; CHECK-V-NEXT:    addi a0, sp, 16
5089; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
5090; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
5091; CHECK-V-NEXT:    csrr a0, vlenb
5092; CHECK-V-NEXT:    add a0, sp, a0
5093; CHECK-V-NEXT:    addi a0, a0, 16
5094; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
5095; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
5096; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
5097; CHECK-V-NEXT:    csrr a0, vlenb
5098; CHECK-V-NEXT:    slli a0, a0, 1
5099; CHECK-V-NEXT:    add a0, sp, a0
5100; CHECK-V-NEXT:    addi a0, a0, 16
5101; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
5102; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
5103; CHECK-V-NEXT:    vslideup.vi v10, v8, 4
5104; CHECK-V-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
5105; CHECK-V-NEXT:    vnclip.wi v8, v10, 0
5106; CHECK-V-NEXT:    csrr a0, vlenb
5107; CHECK-V-NEXT:    slli a0, a0, 2
5108; CHECK-V-NEXT:    add sp, sp, a0
5109; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
5110; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
5111; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
5112; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
5113; CHECK-V-NEXT:    ld s2, 48(sp) # 8-byte Folded Reload
5114; CHECK-V-NEXT:    ld s3, 40(sp) # 8-byte Folded Reload
5115; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
5116; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
5117; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
5118; CHECK-V-NEXT:    .cfi_restore ra
5119; CHECK-V-NEXT:    .cfi_restore s0
5120; CHECK-V-NEXT:    .cfi_restore s1
5121; CHECK-V-NEXT:    .cfi_restore s2
5122; CHECK-V-NEXT:    .cfi_restore s3
5123; CHECK-V-NEXT:    .cfi_restore s4
5124; CHECK-V-NEXT:    .cfi_restore s5
5125; CHECK-V-NEXT:    .cfi_restore s6
5126; CHECK-V-NEXT:    addi sp, sp, 80
5127; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
5128; CHECK-V-NEXT:    ret
5129entry:
5130  %conv = fptosi <8 x half> %x to <8 x i32>
5131  %spec.store.select = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %conv, <8 x i32> <i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767, i32 32767>)
5132  %spec.store.select7 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %spec.store.select, <8 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768, i32 -32768>)
5133  %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16>
5134  ret <8 x i16> %conv6
5135}
5136
5137define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
5138; CHECK-NOV-LABEL: utesth_f16i16_mm:
5139; CHECK-NOV:       # %bb.0: # %entry
5140; CHECK-NOV-NEXT:    addi sp, sp, -128
5141; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 128
5142; CHECK-NOV-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
5143; CHECK-NOV-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
5144; CHECK-NOV-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
5145; CHECK-NOV-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
5146; CHECK-NOV-NEXT:    sd s3, 88(sp) # 8-byte Folded Spill
5147; CHECK-NOV-NEXT:    sd s4, 80(sp) # 8-byte Folded Spill
5148; CHECK-NOV-NEXT:    sd s5, 72(sp) # 8-byte Folded Spill
5149; CHECK-NOV-NEXT:    sd s6, 64(sp) # 8-byte Folded Spill
5150; CHECK-NOV-NEXT:    sd s7, 56(sp) # 8-byte Folded Spill
5151; CHECK-NOV-NEXT:    fsd fs0, 48(sp) # 8-byte Folded Spill
5152; CHECK-NOV-NEXT:    fsd fs1, 40(sp) # 8-byte Folded Spill
5153; CHECK-NOV-NEXT:    fsd fs2, 32(sp) # 8-byte Folded Spill
5154; CHECK-NOV-NEXT:    fsd fs3, 24(sp) # 8-byte Folded Spill
5155; CHECK-NOV-NEXT:    fsd fs4, 16(sp) # 8-byte Folded Spill
5156; CHECK-NOV-NEXT:    fsd fs5, 8(sp) # 8-byte Folded Spill
5157; CHECK-NOV-NEXT:    fsd fs6, 0(sp) # 8-byte Folded Spill
5158; CHECK-NOV-NEXT:    .cfi_offset ra, -8
5159; CHECK-NOV-NEXT:    .cfi_offset s0, -16
5160; CHECK-NOV-NEXT:    .cfi_offset s1, -24
5161; CHECK-NOV-NEXT:    .cfi_offset s2, -32
5162; CHECK-NOV-NEXT:    .cfi_offset s3, -40
5163; CHECK-NOV-NEXT:    .cfi_offset s4, -48
5164; CHECK-NOV-NEXT:    .cfi_offset s5, -56
5165; CHECK-NOV-NEXT:    .cfi_offset s6, -64
5166; CHECK-NOV-NEXT:    .cfi_offset s7, -72
5167; CHECK-NOV-NEXT:    .cfi_offset fs0, -80
5168; CHECK-NOV-NEXT:    .cfi_offset fs1, -88
5169; CHECK-NOV-NEXT:    .cfi_offset fs2, -96
5170; CHECK-NOV-NEXT:    .cfi_offset fs3, -104
5171; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
5172; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
5173; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
5174; CHECK-NOV-NEXT:    .cfi_remember_state
5175; CHECK-NOV-NEXT:    lhu s1, 32(a1)
5176; CHECK-NOV-NEXT:    lhu s2, 40(a1)
5177; CHECK-NOV-NEXT:    lhu s3, 48(a1)
5178; CHECK-NOV-NEXT:    lhu s4, 56(a1)
5179; CHECK-NOV-NEXT:    lhu s5, 0(a1)
5180; CHECK-NOV-NEXT:    lhu a2, 8(a1)
5181; CHECK-NOV-NEXT:    lhu s6, 16(a1)
5182; CHECK-NOV-NEXT:    lhu s7, 24(a1)
5183; CHECK-NOV-NEXT:    mv s0, a0
5184; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
5185; CHECK-NOV-NEXT:    call __extendhfsf2
5186; CHECK-NOV-NEXT:    fmv.s fs6, fa0
5187; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
5188; CHECK-NOV-NEXT:    call __extendhfsf2
5189; CHECK-NOV-NEXT:    fmv.s fs5, fa0
5190; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
5191; CHECK-NOV-NEXT:    call __extendhfsf2
5192; CHECK-NOV-NEXT:    fmv.s fs4, fa0
5193; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
5194; CHECK-NOV-NEXT:    call __extendhfsf2
5195; CHECK-NOV-NEXT:    fmv.s fs3, fa0
5196; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
5197; CHECK-NOV-NEXT:    call __extendhfsf2
5198; CHECK-NOV-NEXT:    fmv.s fs2, fa0
5199; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
5200; CHECK-NOV-NEXT:    call __extendhfsf2
5201; CHECK-NOV-NEXT:    fmv.s fs1, fa0
5202; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
5203; CHECK-NOV-NEXT:    call __extendhfsf2
5204; CHECK-NOV-NEXT:    fmv.s fs0, fa0
5205; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
5206; CHECK-NOV-NEXT:    fcvt.lu.s s1, fs6, rtz
5207; CHECK-NOV-NEXT:    call __extendhfsf2
5208; CHECK-NOV-NEXT:    fcvt.lu.s a0, fa0, rtz
5209; CHECK-NOV-NEXT:    lui a3, 16
5210; CHECK-NOV-NEXT:    addiw a3, a3, -1
5211; CHECK-NOV-NEXT:    bgeu a0, a3, .LBB43_10
5212; CHECK-NOV-NEXT:  # %bb.1: # %entry
5213; CHECK-NOV-NEXT:    fcvt.lu.s a1, fs5, rtz
5214; CHECK-NOV-NEXT:    bgeu s1, a3, .LBB43_11
5215; CHECK-NOV-NEXT:  .LBB43_2: # %entry
5216; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs4, rtz
5217; CHECK-NOV-NEXT:    bgeu a1, a3, .LBB43_12
5218; CHECK-NOV-NEXT:  .LBB43_3: # %entry
5219; CHECK-NOV-NEXT:    fcvt.lu.s a4, fs3, rtz
5220; CHECK-NOV-NEXT:    bgeu a2, a3, .LBB43_13
5221; CHECK-NOV-NEXT:  .LBB43_4: # %entry
5222; CHECK-NOV-NEXT:    fcvt.lu.s a5, fs2, rtz
5223; CHECK-NOV-NEXT:    bgeu a4, a3, .LBB43_14
5224; CHECK-NOV-NEXT:  .LBB43_5: # %entry
5225; CHECK-NOV-NEXT:    fcvt.lu.s a6, fs1, rtz
5226; CHECK-NOV-NEXT:    bgeu a5, a3, .LBB43_15
5227; CHECK-NOV-NEXT:  .LBB43_6: # %entry
5228; CHECK-NOV-NEXT:    fcvt.lu.s a7, fs0, rtz
5229; CHECK-NOV-NEXT:    bgeu a6, a3, .LBB43_16
5230; CHECK-NOV-NEXT:  .LBB43_7: # %entry
5231; CHECK-NOV-NEXT:    bltu a7, a3, .LBB43_9
5232; CHECK-NOV-NEXT:  .LBB43_8: # %entry
5233; CHECK-NOV-NEXT:    mv a7, a3
5234; CHECK-NOV-NEXT:  .LBB43_9: # %entry
5235; CHECK-NOV-NEXT:    sh a4, 8(s0)
5236; CHECK-NOV-NEXT:    sh a5, 10(s0)
5237; CHECK-NOV-NEXT:    sh a6, 12(s0)
5238; CHECK-NOV-NEXT:    sh a7, 14(s0)
5239; CHECK-NOV-NEXT:    sh a0, 0(s0)
5240; CHECK-NOV-NEXT:    sh s1, 2(s0)
5241; CHECK-NOV-NEXT:    sh a1, 4(s0)
5242; CHECK-NOV-NEXT:    sh a2, 6(s0)
5243; CHECK-NOV-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
5244; CHECK-NOV-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
5245; CHECK-NOV-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
5246; CHECK-NOV-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
5247; CHECK-NOV-NEXT:    ld s3, 88(sp) # 8-byte Folded Reload
5248; CHECK-NOV-NEXT:    ld s4, 80(sp) # 8-byte Folded Reload
5249; CHECK-NOV-NEXT:    ld s5, 72(sp) # 8-byte Folded Reload
5250; CHECK-NOV-NEXT:    ld s6, 64(sp) # 8-byte Folded Reload
5251; CHECK-NOV-NEXT:    ld s7, 56(sp) # 8-byte Folded Reload
5252; CHECK-NOV-NEXT:    fld fs0, 48(sp) # 8-byte Folded Reload
5253; CHECK-NOV-NEXT:    fld fs1, 40(sp) # 8-byte Folded Reload
5254; CHECK-NOV-NEXT:    fld fs2, 32(sp) # 8-byte Folded Reload
5255; CHECK-NOV-NEXT:    fld fs3, 24(sp) # 8-byte Folded Reload
5256; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
5257; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
5258; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
5259; CHECK-NOV-NEXT:    .cfi_restore ra
5260; CHECK-NOV-NEXT:    .cfi_restore s0
5261; CHECK-NOV-NEXT:    .cfi_restore s1
5262; CHECK-NOV-NEXT:    .cfi_restore s2
5263; CHECK-NOV-NEXT:    .cfi_restore s3
5264; CHECK-NOV-NEXT:    .cfi_restore s4
5265; CHECK-NOV-NEXT:    .cfi_restore s5
5266; CHECK-NOV-NEXT:    .cfi_restore s6
5267; CHECK-NOV-NEXT:    .cfi_restore s7
5268; CHECK-NOV-NEXT:    .cfi_restore fs0
5269; CHECK-NOV-NEXT:    .cfi_restore fs1
5270; CHECK-NOV-NEXT:    .cfi_restore fs2
5271; CHECK-NOV-NEXT:    .cfi_restore fs3
5272; CHECK-NOV-NEXT:    .cfi_restore fs4
5273; CHECK-NOV-NEXT:    .cfi_restore fs5
5274; CHECK-NOV-NEXT:    .cfi_restore fs6
5275; CHECK-NOV-NEXT:    addi sp, sp, 128
5276; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
5277; CHECK-NOV-NEXT:    ret
5278; CHECK-NOV-NEXT:  .LBB43_10: # %entry
5279; CHECK-NOV-NEXT:    .cfi_restore_state
5280; CHECK-NOV-NEXT:    mv a0, a3
5281; CHECK-NOV-NEXT:    fcvt.lu.s a1, fs5, rtz
5282; CHECK-NOV-NEXT:    bltu s1, a3, .LBB43_2
5283; CHECK-NOV-NEXT:  .LBB43_11: # %entry
5284; CHECK-NOV-NEXT:    mv s1, a3
5285; CHECK-NOV-NEXT:    fcvt.lu.s a2, fs4, rtz
5286; CHECK-NOV-NEXT:    bltu a1, a3, .LBB43_3
5287; CHECK-NOV-NEXT:  .LBB43_12: # %entry
5288; CHECK-NOV-NEXT:    mv a1, a3
5289; CHECK-NOV-NEXT:    fcvt.lu.s a4, fs3, rtz
5290; CHECK-NOV-NEXT:    bltu a2, a3, .LBB43_4
5291; CHECK-NOV-NEXT:  .LBB43_13: # %entry
5292; CHECK-NOV-NEXT:    mv a2, a3
5293; CHECK-NOV-NEXT:    fcvt.lu.s a5, fs2, rtz
5294; CHECK-NOV-NEXT:    bltu a4, a3, .LBB43_5
5295; CHECK-NOV-NEXT:  .LBB43_14: # %entry
5296; CHECK-NOV-NEXT:    mv a4, a3
5297; CHECK-NOV-NEXT:    fcvt.lu.s a6, fs1, rtz
5298; CHECK-NOV-NEXT:    bltu a5, a3, .LBB43_6
5299; CHECK-NOV-NEXT:  .LBB43_15: # %entry
5300; CHECK-NOV-NEXT:    mv a5, a3
5301; CHECK-NOV-NEXT:    fcvt.lu.s a7, fs0, rtz
5302; CHECK-NOV-NEXT:    bltu a6, a3, .LBB43_7
5303; CHECK-NOV-NEXT:  .LBB43_16: # %entry
5304; CHECK-NOV-NEXT:    mv a6, a3
5305; CHECK-NOV-NEXT:    bgeu a7, a3, .LBB43_8
5306; CHECK-NOV-NEXT:    j .LBB43_9
5307;
5308; CHECK-V-LABEL: utesth_f16i16_mm:
5309; CHECK-V:       # %bb.0: # %entry
5310; CHECK-V-NEXT:    addi sp, sp, -80
5311; CHECK-V-NEXT:    .cfi_def_cfa_offset 80
5312; CHECK-V-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
5313; CHECK-V-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
5314; CHECK-V-NEXT:    sd s1, 56(sp) # 8-byte Folded Spill
5315; CHECK-V-NEXT:    sd s2, 48(sp) # 8-byte Folded Spill
5316; CHECK-V-NEXT:    sd s3, 40(sp) # 8-byte Folded Spill
5317; CHECK-V-NEXT:    sd s4, 32(sp) # 8-byte Folded Spill
5318; CHECK-V-NEXT:    sd s5, 24(sp) # 8-byte Folded Spill
5319; CHECK-V-NEXT:    sd s6, 16(sp) # 8-byte Folded Spill
5320; CHECK-V-NEXT:    .cfi_offset ra, -8
5321; CHECK-V-NEXT:    .cfi_offset s0, -16
5322; CHECK-V-NEXT:    .cfi_offset s1, -24
5323; CHECK-V-NEXT:    .cfi_offset s2, -32
5324; CHECK-V-NEXT:    .cfi_offset s3, -40
5325; CHECK-V-NEXT:    .cfi_offset s4, -48
5326; CHECK-V-NEXT:    .cfi_offset s5, -56
5327; CHECK-V-NEXT:    .cfi_offset s6, -64
5328; CHECK-V-NEXT:    csrr a1, vlenb
5329; CHECK-V-NEXT:    slli a1, a1, 2
5330; CHECK-V-NEXT:    sub sp, sp, a1
5331; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
5332; CHECK-V-NEXT:    lhu s0, 0(a0)
5333; CHECK-V-NEXT:    lhu s1, 8(a0)
5334; CHECK-V-NEXT:    lhu s2, 16(a0)
5335; CHECK-V-NEXT:    lhu s3, 24(a0)
5336; CHECK-V-NEXT:    lhu s4, 32(a0)
5337; CHECK-V-NEXT:    lhu s5, 40(a0)
5338; CHECK-V-NEXT:    lhu s6, 48(a0)
5339; CHECK-V-NEXT:    lhu a0, 56(a0)
5340; CHECK-V-NEXT:    fmv.w.x fa0, a0
5341; CHECK-V-NEXT:    call __extendhfsf2
5342; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
5343; CHECK-V-NEXT:    fmv.w.x fa0, s6
5344; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5345; CHECK-V-NEXT:    vmv.s.x v8, a0
5346; CHECK-V-NEXT:    csrr a0, vlenb
5347; CHECK-V-NEXT:    slli a0, a0, 1
5348; CHECK-V-NEXT:    add a0, sp, a0
5349; CHECK-V-NEXT:    addi a0, a0, 16
5350; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5351; CHECK-V-NEXT:    call __extendhfsf2
5352; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
5353; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5354; CHECK-V-NEXT:    vmv.s.x v8, a0
5355; CHECK-V-NEXT:    csrr a0, vlenb
5356; CHECK-V-NEXT:    slli a0, a0, 1
5357; CHECK-V-NEXT:    add a0, sp, a0
5358; CHECK-V-NEXT:    addi a0, a0, 16
5359; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5360; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
5361; CHECK-V-NEXT:    csrr a0, vlenb
5362; CHECK-V-NEXT:    add a0, sp, a0
5363; CHECK-V-NEXT:    addi a0, a0, 16
5364; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5365; CHECK-V-NEXT:    fmv.w.x fa0, s5
5366; CHECK-V-NEXT:    call __extendhfsf2
5367; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
5368; CHECK-V-NEXT:    fmv.w.x fa0, s4
5369; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5370; CHECK-V-NEXT:    vmv.s.x v8, a0
5371; CHECK-V-NEXT:    csrr a0, vlenb
5372; CHECK-V-NEXT:    slli a0, a0, 1
5373; CHECK-V-NEXT:    add a0, sp, a0
5374; CHECK-V-NEXT:    addi a0, a0, 16
5375; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5376; CHECK-V-NEXT:    call __extendhfsf2
5377; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
5378; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5379; CHECK-V-NEXT:    vmv.s.x v8, a0
5380; CHECK-V-NEXT:    csrr a0, vlenb
5381; CHECK-V-NEXT:    slli a0, a0, 1
5382; CHECK-V-NEXT:    add a0, sp, a0
5383; CHECK-V-NEXT:    addi a0, a0, 16
5384; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5385; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
5386; CHECK-V-NEXT:    csrr a0, vlenb
5387; CHECK-V-NEXT:    add a0, sp, a0
5388; CHECK-V-NEXT:    addi a0, a0, 16
5389; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5390; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
5391; CHECK-V-NEXT:    vslideup.vi v8, v9, 2
5392; CHECK-V-NEXT:    csrr a0, vlenb
5393; CHECK-V-NEXT:    slli a0, a0, 1
5394; CHECK-V-NEXT:    add a0, sp, a0
5395; CHECK-V-NEXT:    addi a0, a0, 16
5396; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
5397; CHECK-V-NEXT:    fmv.w.x fa0, s3
5398; CHECK-V-NEXT:    call __extendhfsf2
5399; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
5400; CHECK-V-NEXT:    fmv.w.x fa0, s2
5401; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5402; CHECK-V-NEXT:    vmv.s.x v8, a0
5403; CHECK-V-NEXT:    addi a0, sp, 16
5404; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5405; CHECK-V-NEXT:    call __extendhfsf2
5406; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
5407; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5408; CHECK-V-NEXT:    vmv.s.x v8, a0
5409; CHECK-V-NEXT:    addi a0, sp, 16
5410; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5411; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
5412; CHECK-V-NEXT:    csrr a0, vlenb
5413; CHECK-V-NEXT:    add a0, sp, a0
5414; CHECK-V-NEXT:    addi a0, a0, 16
5415; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5416; CHECK-V-NEXT:    fmv.w.x fa0, s1
5417; CHECK-V-NEXT:    call __extendhfsf2
5418; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
5419; CHECK-V-NEXT:    fmv.w.x fa0, s0
5420; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5421; CHECK-V-NEXT:    vmv.s.x v8, a0
5422; CHECK-V-NEXT:    addi a0, sp, 16
5423; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5424; CHECK-V-NEXT:    call __extendhfsf2
5425; CHECK-V-NEXT:    fcvt.lu.s a0, fa0, rtz
5426; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5427; CHECK-V-NEXT:    vmv.s.x v10, a0
5428; CHECK-V-NEXT:    addi a0, sp, 16
5429; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
5430; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
5431; CHECK-V-NEXT:    csrr a0, vlenb
5432; CHECK-V-NEXT:    add a0, sp, a0
5433; CHECK-V-NEXT:    addi a0, a0, 16
5434; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
5435; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
5436; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
5437; CHECK-V-NEXT:    csrr a0, vlenb
5438; CHECK-V-NEXT:    slli a0, a0, 1
5439; CHECK-V-NEXT:    add a0, sp, a0
5440; CHECK-V-NEXT:    addi a0, a0, 16
5441; CHECK-V-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
5442; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
5443; CHECK-V-NEXT:    vslideup.vi v10, v8, 4
5444; CHECK-V-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
5445; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
5446; CHECK-V-NEXT:    csrr a0, vlenb
5447; CHECK-V-NEXT:    slli a0, a0, 2
5448; CHECK-V-NEXT:    add sp, sp, a0
5449; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
5450; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
5451; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
5452; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
5453; CHECK-V-NEXT:    ld s2, 48(sp) # 8-byte Folded Reload
5454; CHECK-V-NEXT:    ld s3, 40(sp) # 8-byte Folded Reload
5455; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
5456; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
5457; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
5458; CHECK-V-NEXT:    .cfi_restore ra
5459; CHECK-V-NEXT:    .cfi_restore s0
5460; CHECK-V-NEXT:    .cfi_restore s1
5461; CHECK-V-NEXT:    .cfi_restore s2
5462; CHECK-V-NEXT:    .cfi_restore s3
5463; CHECK-V-NEXT:    .cfi_restore s4
5464; CHECK-V-NEXT:    .cfi_restore s5
5465; CHECK-V-NEXT:    .cfi_restore s6
5466; CHECK-V-NEXT:    addi sp, sp, 80
5467; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
5468; CHECK-V-NEXT:    ret
5469entry:
5470  %conv = fptoui <8 x half> %x to <8 x i32>
5471  %spec.store.select = call <8 x i32> @llvm.umin.v8i32(<8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>)
5472  %conv6 = trunc <8 x i32> %spec.store.select to <8 x i16>
5473  ret <8 x i16> %conv6
5474}
5475
5476define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
5477; CHECK-NOV-LABEL: ustest_f16i16_mm:
5478; CHECK-NOV:       # %bb.0: # %entry
5479; CHECK-NOV-NEXT:    addi sp, sp, -128
5480; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 128
5481; CHECK-NOV-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
5482; CHECK-NOV-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
5483; CHECK-NOV-NEXT:    sd s1, 104(sp) # 8-byte Folded Spill
5484; CHECK-NOV-NEXT:    sd s2, 96(sp) # 8-byte Folded Spill
5485; CHECK-NOV-NEXT:    sd s3, 88(sp) # 8-byte Folded Spill
5486; CHECK-NOV-NEXT:    sd s4, 80(sp) # 8-byte Folded Spill
5487; CHECK-NOV-NEXT:    sd s5, 72(sp) # 8-byte Folded Spill
5488; CHECK-NOV-NEXT:    sd s6, 64(sp) # 8-byte Folded Spill
5489; CHECK-NOV-NEXT:    sd s7, 56(sp) # 8-byte Folded Spill
5490; CHECK-NOV-NEXT:    fsd fs0, 48(sp) # 8-byte Folded Spill
5491; CHECK-NOV-NEXT:    fsd fs1, 40(sp) # 8-byte Folded Spill
5492; CHECK-NOV-NEXT:    fsd fs2, 32(sp) # 8-byte Folded Spill
5493; CHECK-NOV-NEXT:    fsd fs3, 24(sp) # 8-byte Folded Spill
5494; CHECK-NOV-NEXT:    fsd fs4, 16(sp) # 8-byte Folded Spill
5495; CHECK-NOV-NEXT:    fsd fs5, 8(sp) # 8-byte Folded Spill
5496; CHECK-NOV-NEXT:    fsd fs6, 0(sp) # 8-byte Folded Spill
5497; CHECK-NOV-NEXT:    .cfi_offset ra, -8
5498; CHECK-NOV-NEXT:    .cfi_offset s0, -16
5499; CHECK-NOV-NEXT:    .cfi_offset s1, -24
5500; CHECK-NOV-NEXT:    .cfi_offset s2, -32
5501; CHECK-NOV-NEXT:    .cfi_offset s3, -40
5502; CHECK-NOV-NEXT:    .cfi_offset s4, -48
5503; CHECK-NOV-NEXT:    .cfi_offset s5, -56
5504; CHECK-NOV-NEXT:    .cfi_offset s6, -64
5505; CHECK-NOV-NEXT:    .cfi_offset s7, -72
5506; CHECK-NOV-NEXT:    .cfi_offset fs0, -80
5507; CHECK-NOV-NEXT:    .cfi_offset fs1, -88
5508; CHECK-NOV-NEXT:    .cfi_offset fs2, -96
5509; CHECK-NOV-NEXT:    .cfi_offset fs3, -104
5510; CHECK-NOV-NEXT:    .cfi_offset fs4, -112
5511; CHECK-NOV-NEXT:    .cfi_offset fs5, -120
5512; CHECK-NOV-NEXT:    .cfi_offset fs6, -128
5513; CHECK-NOV-NEXT:    .cfi_remember_state
5514; CHECK-NOV-NEXT:    lhu s1, 32(a1)
5515; CHECK-NOV-NEXT:    lhu s2, 40(a1)
5516; CHECK-NOV-NEXT:    lhu a2, 48(a1)
5517; CHECK-NOV-NEXT:    lhu s3, 56(a1)
5518; CHECK-NOV-NEXT:    lhu s4, 0(a1)
5519; CHECK-NOV-NEXT:    lhu s5, 8(a1)
5520; CHECK-NOV-NEXT:    lhu s6, 16(a1)
5521; CHECK-NOV-NEXT:    lhu s7, 24(a1)
5522; CHECK-NOV-NEXT:    mv s0, a0
5523; CHECK-NOV-NEXT:    fmv.w.x fa0, a2
5524; CHECK-NOV-NEXT:    call __extendhfsf2
5525; CHECK-NOV-NEXT:    fmv.s fs6, fa0
5526; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
5527; CHECK-NOV-NEXT:    call __extendhfsf2
5528; CHECK-NOV-NEXT:    fmv.s fs5, fa0
5529; CHECK-NOV-NEXT:    fmv.w.x fa0, s1
5530; CHECK-NOV-NEXT:    call __extendhfsf2
5531; CHECK-NOV-NEXT:    fmv.s fs4, fa0
5532; CHECK-NOV-NEXT:    fmv.w.x fa0, s7
5533; CHECK-NOV-NEXT:    call __extendhfsf2
5534; CHECK-NOV-NEXT:    fmv.s fs3, fa0
5535; CHECK-NOV-NEXT:    fmv.w.x fa0, s6
5536; CHECK-NOV-NEXT:    call __extendhfsf2
5537; CHECK-NOV-NEXT:    fmv.s fs2, fa0
5538; CHECK-NOV-NEXT:    fmv.w.x fa0, s5
5539; CHECK-NOV-NEXT:    call __extendhfsf2
5540; CHECK-NOV-NEXT:    fmv.s fs1, fa0
5541; CHECK-NOV-NEXT:    fmv.w.x fa0, s4
5542; CHECK-NOV-NEXT:    call __extendhfsf2
5543; CHECK-NOV-NEXT:    fmv.s fs0, fa0
5544; CHECK-NOV-NEXT:    fmv.w.x fa0, s3
5545; CHECK-NOV-NEXT:    fcvt.l.s s1, fs6, rtz
5546; CHECK-NOV-NEXT:    call __extendhfsf2
5547; CHECK-NOV-NEXT:    fcvt.l.s a0, fa0, rtz
5548; CHECK-NOV-NEXT:    lui a3, 16
5549; CHECK-NOV-NEXT:    addiw a3, a3, -1
5550; CHECK-NOV-NEXT:    bge a0, a3, .LBB44_10
5551; CHECK-NOV-NEXT:  # %bb.1: # %entry
5552; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
5553; CHECK-NOV-NEXT:    bge s1, a3, .LBB44_11
5554; CHECK-NOV-NEXT:  .LBB44_2: # %entry
5555; CHECK-NOV-NEXT:    fcvt.l.s a2, fs4, rtz
5556; CHECK-NOV-NEXT:    bge a1, a3, .LBB44_12
5557; CHECK-NOV-NEXT:  .LBB44_3: # %entry
5558; CHECK-NOV-NEXT:    fcvt.l.s a4, fs3, rtz
5559; CHECK-NOV-NEXT:    bge a2, a3, .LBB44_13
5560; CHECK-NOV-NEXT:  .LBB44_4: # %entry
5561; CHECK-NOV-NEXT:    fcvt.l.s a5, fs2, rtz
5562; CHECK-NOV-NEXT:    bge a4, a3, .LBB44_14
5563; CHECK-NOV-NEXT:  .LBB44_5: # %entry
5564; CHECK-NOV-NEXT:    fcvt.l.s a6, fs1, rtz
5565; CHECK-NOV-NEXT:    bge a5, a3, .LBB44_15
5566; CHECK-NOV-NEXT:  .LBB44_6: # %entry
5567; CHECK-NOV-NEXT:    fcvt.l.s a7, fs0, rtz
5568; CHECK-NOV-NEXT:    bge a6, a3, .LBB44_16
5569; CHECK-NOV-NEXT:  .LBB44_7: # %entry
5570; CHECK-NOV-NEXT:    blt a7, a3, .LBB44_9
5571; CHECK-NOV-NEXT:  .LBB44_8: # %entry
5572; CHECK-NOV-NEXT:    mv a7, a3
5573; CHECK-NOV-NEXT:  .LBB44_9: # %entry
5574; CHECK-NOV-NEXT:    sgtz a3, a7
5575; CHECK-NOV-NEXT:    negw a3, a3
5576; CHECK-NOV-NEXT:    and a3, a3, a7
5577; CHECK-NOV-NEXT:    sgtz a7, a6
5578; CHECK-NOV-NEXT:    negw a7, a7
5579; CHECK-NOV-NEXT:    and a6, a7, a6
5580; CHECK-NOV-NEXT:    sgtz a7, a5
5581; CHECK-NOV-NEXT:    negw a7, a7
5582; CHECK-NOV-NEXT:    and a5, a7, a5
5583; CHECK-NOV-NEXT:    sgtz a7, a4
5584; CHECK-NOV-NEXT:    negw a7, a7
5585; CHECK-NOV-NEXT:    and a4, a7, a4
5586; CHECK-NOV-NEXT:    sgtz a7, a2
5587; CHECK-NOV-NEXT:    negw a7, a7
5588; CHECK-NOV-NEXT:    and a2, a7, a2
5589; CHECK-NOV-NEXT:    sgtz a7, a1
5590; CHECK-NOV-NEXT:    negw a7, a7
5591; CHECK-NOV-NEXT:    and a1, a7, a1
5592; CHECK-NOV-NEXT:    sgtz a7, s1
5593; CHECK-NOV-NEXT:    negw a7, a7
5594; CHECK-NOV-NEXT:    and a7, a7, s1
5595; CHECK-NOV-NEXT:    sgtz t0, a0
5596; CHECK-NOV-NEXT:    negw t0, t0
5597; CHECK-NOV-NEXT:    and a0, t0, a0
5598; CHECK-NOV-NEXT:    sh a2, 8(s0)
5599; CHECK-NOV-NEXT:    sh a1, 10(s0)
5600; CHECK-NOV-NEXT:    sh a7, 12(s0)
5601; CHECK-NOV-NEXT:    sh a0, 14(s0)
5602; CHECK-NOV-NEXT:    sh a3, 0(s0)
5603; CHECK-NOV-NEXT:    sh a6, 2(s0)
5604; CHECK-NOV-NEXT:    sh a5, 4(s0)
5605; CHECK-NOV-NEXT:    sh a4, 6(s0)
5606; CHECK-NOV-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
5607; CHECK-NOV-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
5608; CHECK-NOV-NEXT:    ld s1, 104(sp) # 8-byte Folded Reload
5609; CHECK-NOV-NEXT:    ld s2, 96(sp) # 8-byte Folded Reload
5610; CHECK-NOV-NEXT:    ld s3, 88(sp) # 8-byte Folded Reload
5611; CHECK-NOV-NEXT:    ld s4, 80(sp) # 8-byte Folded Reload
5612; CHECK-NOV-NEXT:    ld s5, 72(sp) # 8-byte Folded Reload
5613; CHECK-NOV-NEXT:    ld s6, 64(sp) # 8-byte Folded Reload
5614; CHECK-NOV-NEXT:    ld s7, 56(sp) # 8-byte Folded Reload
5615; CHECK-NOV-NEXT:    fld fs0, 48(sp) # 8-byte Folded Reload
5616; CHECK-NOV-NEXT:    fld fs1, 40(sp) # 8-byte Folded Reload
5617; CHECK-NOV-NEXT:    fld fs2, 32(sp) # 8-byte Folded Reload
5618; CHECK-NOV-NEXT:    fld fs3, 24(sp) # 8-byte Folded Reload
5619; CHECK-NOV-NEXT:    fld fs4, 16(sp) # 8-byte Folded Reload
5620; CHECK-NOV-NEXT:    fld fs5, 8(sp) # 8-byte Folded Reload
5621; CHECK-NOV-NEXT:    fld fs6, 0(sp) # 8-byte Folded Reload
5622; CHECK-NOV-NEXT:    .cfi_restore ra
5623; CHECK-NOV-NEXT:    .cfi_restore s0
5624; CHECK-NOV-NEXT:    .cfi_restore s1
5625; CHECK-NOV-NEXT:    .cfi_restore s2
5626; CHECK-NOV-NEXT:    .cfi_restore s3
5627; CHECK-NOV-NEXT:    .cfi_restore s4
5628; CHECK-NOV-NEXT:    .cfi_restore s5
5629; CHECK-NOV-NEXT:    .cfi_restore s6
5630; CHECK-NOV-NEXT:    .cfi_restore s7
5631; CHECK-NOV-NEXT:    .cfi_restore fs0
5632; CHECK-NOV-NEXT:    .cfi_restore fs1
5633; CHECK-NOV-NEXT:    .cfi_restore fs2
5634; CHECK-NOV-NEXT:    .cfi_restore fs3
5635; CHECK-NOV-NEXT:    .cfi_restore fs4
5636; CHECK-NOV-NEXT:    .cfi_restore fs5
5637; CHECK-NOV-NEXT:    .cfi_restore fs6
5638; CHECK-NOV-NEXT:    addi sp, sp, 128
5639; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
5640; CHECK-NOV-NEXT:    ret
5641; CHECK-NOV-NEXT:  .LBB44_10: # %entry
5642; CHECK-NOV-NEXT:    .cfi_restore_state
5643; CHECK-NOV-NEXT:    mv a0, a3
5644; CHECK-NOV-NEXT:    fcvt.l.s a1, fs5, rtz
5645; CHECK-NOV-NEXT:    blt s1, a3, .LBB44_2
5646; CHECK-NOV-NEXT:  .LBB44_11: # %entry
5647; CHECK-NOV-NEXT:    mv s1, a3
5648; CHECK-NOV-NEXT:    fcvt.l.s a2, fs4, rtz
5649; CHECK-NOV-NEXT:    blt a1, a3, .LBB44_3
5650; CHECK-NOV-NEXT:  .LBB44_12: # %entry
5651; CHECK-NOV-NEXT:    mv a1, a3
5652; CHECK-NOV-NEXT:    fcvt.l.s a4, fs3, rtz
5653; CHECK-NOV-NEXT:    blt a2, a3, .LBB44_4
5654; CHECK-NOV-NEXT:  .LBB44_13: # %entry
5655; CHECK-NOV-NEXT:    mv a2, a3
5656; CHECK-NOV-NEXT:    fcvt.l.s a5, fs2, rtz
5657; CHECK-NOV-NEXT:    blt a4, a3, .LBB44_5
5658; CHECK-NOV-NEXT:  .LBB44_14: # %entry
5659; CHECK-NOV-NEXT:    mv a4, a3
5660; CHECK-NOV-NEXT:    fcvt.l.s a6, fs1, rtz
5661; CHECK-NOV-NEXT:    blt a5, a3, .LBB44_6
5662; CHECK-NOV-NEXT:  .LBB44_15: # %entry
5663; CHECK-NOV-NEXT:    mv a5, a3
5664; CHECK-NOV-NEXT:    fcvt.l.s a7, fs0, rtz
5665; CHECK-NOV-NEXT:    blt a6, a3, .LBB44_7
5666; CHECK-NOV-NEXT:  .LBB44_16: # %entry
5667; CHECK-NOV-NEXT:    mv a6, a3
5668; CHECK-NOV-NEXT:    bge a7, a3, .LBB44_8
5669; CHECK-NOV-NEXT:    j .LBB44_9
5670;
5671; CHECK-V-LABEL: ustest_f16i16_mm:
5672; CHECK-V:       # %bb.0: # %entry
5673; CHECK-V-NEXT:    addi sp, sp, -80
5674; CHECK-V-NEXT:    .cfi_def_cfa_offset 80
5675; CHECK-V-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
5676; CHECK-V-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
5677; CHECK-V-NEXT:    sd s1, 56(sp) # 8-byte Folded Spill
5678; CHECK-V-NEXT:    sd s2, 48(sp) # 8-byte Folded Spill
5679; CHECK-V-NEXT:    sd s3, 40(sp) # 8-byte Folded Spill
5680; CHECK-V-NEXT:    sd s4, 32(sp) # 8-byte Folded Spill
5681; CHECK-V-NEXT:    sd s5, 24(sp) # 8-byte Folded Spill
5682; CHECK-V-NEXT:    sd s6, 16(sp) # 8-byte Folded Spill
5683; CHECK-V-NEXT:    .cfi_offset ra, -8
5684; CHECK-V-NEXT:    .cfi_offset s0, -16
5685; CHECK-V-NEXT:    .cfi_offset s1, -24
5686; CHECK-V-NEXT:    .cfi_offset s2, -32
5687; CHECK-V-NEXT:    .cfi_offset s3, -40
5688; CHECK-V-NEXT:    .cfi_offset s4, -48
5689; CHECK-V-NEXT:    .cfi_offset s5, -56
5690; CHECK-V-NEXT:    .cfi_offset s6, -64
5691; CHECK-V-NEXT:    csrr a1, vlenb
5692; CHECK-V-NEXT:    slli a1, a1, 2
5693; CHECK-V-NEXT:    sub sp, sp, a1
5694; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 4 * vlenb
5695; CHECK-V-NEXT:    lhu s0, 0(a0)
5696; CHECK-V-NEXT:    lhu s1, 8(a0)
5697; CHECK-V-NEXT:    lhu s2, 16(a0)
5698; CHECK-V-NEXT:    lhu s3, 24(a0)
5699; CHECK-V-NEXT:    lhu s4, 32(a0)
5700; CHECK-V-NEXT:    lhu s5, 40(a0)
5701; CHECK-V-NEXT:    lhu s6, 48(a0)
5702; CHECK-V-NEXT:    lhu a0, 56(a0)
5703; CHECK-V-NEXT:    fmv.w.x fa0, a0
5704; CHECK-V-NEXT:    call __extendhfsf2
5705; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5706; CHECK-V-NEXT:    fmv.w.x fa0, s6
5707; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5708; CHECK-V-NEXT:    vmv.s.x v8, a0
5709; CHECK-V-NEXT:    csrr a0, vlenb
5710; CHECK-V-NEXT:    slli a0, a0, 1
5711; CHECK-V-NEXT:    add a0, sp, a0
5712; CHECK-V-NEXT:    addi a0, a0, 16
5713; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5714; CHECK-V-NEXT:    call __extendhfsf2
5715; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5716; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5717; CHECK-V-NEXT:    vmv.s.x v8, a0
5718; CHECK-V-NEXT:    csrr a0, vlenb
5719; CHECK-V-NEXT:    slli a0, a0, 1
5720; CHECK-V-NEXT:    add a0, sp, a0
5721; CHECK-V-NEXT:    addi a0, a0, 16
5722; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5723; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
5724; CHECK-V-NEXT:    csrr a0, vlenb
5725; CHECK-V-NEXT:    add a0, sp, a0
5726; CHECK-V-NEXT:    addi a0, a0, 16
5727; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5728; CHECK-V-NEXT:    fmv.w.x fa0, s5
5729; CHECK-V-NEXT:    call __extendhfsf2
5730; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5731; CHECK-V-NEXT:    fmv.w.x fa0, s4
5732; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5733; CHECK-V-NEXT:    vmv.s.x v8, a0
5734; CHECK-V-NEXT:    csrr a0, vlenb
5735; CHECK-V-NEXT:    slli a0, a0, 1
5736; CHECK-V-NEXT:    add a0, sp, a0
5737; CHECK-V-NEXT:    addi a0, a0, 16
5738; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5739; CHECK-V-NEXT:    call __extendhfsf2
5740; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5741; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5742; CHECK-V-NEXT:    vmv.s.x v8, a0
5743; CHECK-V-NEXT:    csrr a0, vlenb
5744; CHECK-V-NEXT:    slli a0, a0, 1
5745; CHECK-V-NEXT:    add a0, sp, a0
5746; CHECK-V-NEXT:    addi a0, a0, 16
5747; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5748; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
5749; CHECK-V-NEXT:    csrr a0, vlenb
5750; CHECK-V-NEXT:    add a0, sp, a0
5751; CHECK-V-NEXT:    addi a0, a0, 16
5752; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5753; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
5754; CHECK-V-NEXT:    vslideup.vi v8, v9, 2
5755; CHECK-V-NEXT:    csrr a0, vlenb
5756; CHECK-V-NEXT:    slli a0, a0, 1
5757; CHECK-V-NEXT:    add a0, sp, a0
5758; CHECK-V-NEXT:    addi a0, a0, 16
5759; CHECK-V-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
5760; CHECK-V-NEXT:    fmv.w.x fa0, s3
5761; CHECK-V-NEXT:    call __extendhfsf2
5762; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5763; CHECK-V-NEXT:    fmv.w.x fa0, s2
5764; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5765; CHECK-V-NEXT:    vmv.s.x v8, a0
5766; CHECK-V-NEXT:    addi a0, sp, 16
5767; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5768; CHECK-V-NEXT:    call __extendhfsf2
5769; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5770; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5771; CHECK-V-NEXT:    vmv.s.x v8, a0
5772; CHECK-V-NEXT:    addi a0, sp, 16
5773; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5774; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
5775; CHECK-V-NEXT:    csrr a0, vlenb
5776; CHECK-V-NEXT:    add a0, sp, a0
5777; CHECK-V-NEXT:    addi a0, a0, 16
5778; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5779; CHECK-V-NEXT:    fmv.w.x fa0, s1
5780; CHECK-V-NEXT:    call __extendhfsf2
5781; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5782; CHECK-V-NEXT:    fmv.w.x fa0, s0
5783; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
5784; CHECK-V-NEXT:    vmv.s.x v8, a0
5785; CHECK-V-NEXT:    addi a0, sp, 16
5786; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5787; CHECK-V-NEXT:    call __extendhfsf2
5788; CHECK-V-NEXT:    fcvt.l.s a0, fa0, rtz
5789; CHECK-V-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
5790; CHECK-V-NEXT:    vmv.s.x v8, a0
5791; CHECK-V-NEXT:    addi a0, sp, 16
5792; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5793; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
5794; CHECK-V-NEXT:    csrr a0, vlenb
5795; CHECK-V-NEXT:    add a0, sp, a0
5796; CHECK-V-NEXT:    addi a0, a0, 16
5797; CHECK-V-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
5798; CHECK-V-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
5799; CHECK-V-NEXT:    vslideup.vi v8, v9, 2
5800; CHECK-V-NEXT:    csrr a0, vlenb
5801; CHECK-V-NEXT:    slli a0, a0, 1
5802; CHECK-V-NEXT:    add a0, sp, a0
5803; CHECK-V-NEXT:    addi a0, a0, 16
5804; CHECK-V-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
5805; CHECK-V-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
5806; CHECK-V-NEXT:    vslideup.vi v8, v10, 4
5807; CHECK-V-NEXT:    vmax.vx v10, v8, zero
5808; CHECK-V-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
5809; CHECK-V-NEXT:    vnclipu.wi v8, v10, 0
5810; CHECK-V-NEXT:    csrr a0, vlenb
5811; CHECK-V-NEXT:    slli a0, a0, 2
5812; CHECK-V-NEXT:    add sp, sp, a0
5813; CHECK-V-NEXT:    .cfi_def_cfa sp, 80
5814; CHECK-V-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
5815; CHECK-V-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
5816; CHECK-V-NEXT:    ld s1, 56(sp) # 8-byte Folded Reload
5817; CHECK-V-NEXT:    ld s2, 48(sp) # 8-byte Folded Reload
5818; CHECK-V-NEXT:    ld s3, 40(sp) # 8-byte Folded Reload
5819; CHECK-V-NEXT:    ld s4, 32(sp) # 8-byte Folded Reload
5820; CHECK-V-NEXT:    ld s5, 24(sp) # 8-byte Folded Reload
5821; CHECK-V-NEXT:    ld s6, 16(sp) # 8-byte Folded Reload
5822; CHECK-V-NEXT:    .cfi_restore ra
5823; CHECK-V-NEXT:    .cfi_restore s0
5824; CHECK-V-NEXT:    .cfi_restore s1
5825; CHECK-V-NEXT:    .cfi_restore s2
5826; CHECK-V-NEXT:    .cfi_restore s3
5827; CHECK-V-NEXT:    .cfi_restore s4
5828; CHECK-V-NEXT:    .cfi_restore s5
5829; CHECK-V-NEXT:    .cfi_restore s6
5830; CHECK-V-NEXT:    addi sp, sp, 80
5831; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
5832; CHECK-V-NEXT:    ret
5833entry:
5834  %conv = fptosi <8 x half> %x to <8 x i32>
5835  %spec.store.select = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %conv, <8 x i32> <i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535, i32 65535>)
5836  %spec.store.select7 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> %spec.store.select, <8 x i32> zeroinitializer)
5837  %conv6 = trunc <8 x i32> %spec.store.select7 to <8 x i16>
5838  ret <8 x i16> %conv6
5839}
5840
5841; i64 saturate
5842
5843define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
5844; CHECK-NOV-LABEL: stest_f64i64_mm:
5845; CHECK-NOV:       # %bb.0: # %entry
5846; CHECK-NOV-NEXT:    addi sp, sp, -32
5847; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
5848; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
5849; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
5850; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
5851; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
5852; CHECK-NOV-NEXT:    .cfi_offset ra, -8
5853; CHECK-NOV-NEXT:    .cfi_offset s0, -16
5854; CHECK-NOV-NEXT:    .cfi_offset s1, -24
5855; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
5856; CHECK-NOV-NEXT:    fmv.d fs0, fa1
5857; CHECK-NOV-NEXT:    call __fixdfti
5858; CHECK-NOV-NEXT:    mv s0, a0
5859; CHECK-NOV-NEXT:    mv s1, a1
5860; CHECK-NOV-NEXT:    fmv.d fa0, fs0
5861; CHECK-NOV-NEXT:    call __fixdfti
5862; CHECK-NOV-NEXT:    mv a2, a0
5863; CHECK-NOV-NEXT:    li a0, -1
5864; CHECK-NOV-NEXT:    srli a3, a0, 1
5865; CHECK-NOV-NEXT:    beqz a1, .LBB45_2
5866; CHECK-NOV-NEXT:  # %bb.1: # %entry
5867; CHECK-NOV-NEXT:    slti a4, a1, 0
5868; CHECK-NOV-NEXT:    beqz a4, .LBB45_3
5869; CHECK-NOV-NEXT:    j .LBB45_4
5870; CHECK-NOV-NEXT:  .LBB45_2:
5871; CHECK-NOV-NEXT:    sltu a4, a2, a3
5872; CHECK-NOV-NEXT:    bnez a4, .LBB45_4
5873; CHECK-NOV-NEXT:  .LBB45_3: # %entry
5874; CHECK-NOV-NEXT:    mv a2, a3
5875; CHECK-NOV-NEXT:  .LBB45_4: # %entry
5876; CHECK-NOV-NEXT:    beqz s1, .LBB45_6
5877; CHECK-NOV-NEXT:  # %bb.5: # %entry
5878; CHECK-NOV-NEXT:    slti a6, s1, 0
5879; CHECK-NOV-NEXT:    j .LBB45_7
5880; CHECK-NOV-NEXT:  .LBB45_6:
5881; CHECK-NOV-NEXT:    sltu a6, s0, a3
5882; CHECK-NOV-NEXT:  .LBB45_7: # %entry
5883; CHECK-NOV-NEXT:    neg a5, a6
5884; CHECK-NOV-NEXT:    and a5, a5, s1
5885; CHECK-NOV-NEXT:    bnez a6, .LBB45_9
5886; CHECK-NOV-NEXT:  # %bb.8: # %entry
5887; CHECK-NOV-NEXT:    mv s0, a3
5888; CHECK-NOV-NEXT:  .LBB45_9: # %entry
5889; CHECK-NOV-NEXT:    neg a4, a4
5890; CHECK-NOV-NEXT:    slli a3, a0, 63
5891; CHECK-NOV-NEXT:    beq a5, a0, .LBB45_11
5892; CHECK-NOV-NEXT:  # %bb.10: # %entry
5893; CHECK-NOV-NEXT:    slti a5, a5, 0
5894; CHECK-NOV-NEXT:    xori a5, a5, 1
5895; CHECK-NOV-NEXT:    and a1, a4, a1
5896; CHECK-NOV-NEXT:    beqz a5, .LBB45_12
5897; CHECK-NOV-NEXT:    j .LBB45_13
5898; CHECK-NOV-NEXT:  .LBB45_11:
5899; CHECK-NOV-NEXT:    sltu a5, a3, s0
5900; CHECK-NOV-NEXT:    and a1, a4, a1
5901; CHECK-NOV-NEXT:    bnez a5, .LBB45_13
5902; CHECK-NOV-NEXT:  .LBB45_12: # %entry
5903; CHECK-NOV-NEXT:    mv s0, a3
5904; CHECK-NOV-NEXT:  .LBB45_13: # %entry
5905; CHECK-NOV-NEXT:    beq a1, a0, .LBB45_15
5906; CHECK-NOV-NEXT:  # %bb.14: # %entry
5907; CHECK-NOV-NEXT:    slti a0, a1, 0
5908; CHECK-NOV-NEXT:    xori a0, a0, 1
5909; CHECK-NOV-NEXT:    beqz a0, .LBB45_16
5910; CHECK-NOV-NEXT:    j .LBB45_17
5911; CHECK-NOV-NEXT:  .LBB45_15:
5912; CHECK-NOV-NEXT:    sltu a0, a3, a2
5913; CHECK-NOV-NEXT:    bnez a0, .LBB45_17
5914; CHECK-NOV-NEXT:  .LBB45_16: # %entry
5915; CHECK-NOV-NEXT:    mv a2, a3
5916; CHECK-NOV-NEXT:  .LBB45_17: # %entry
5917; CHECK-NOV-NEXT:    mv a0, s0
5918; CHECK-NOV-NEXT:    mv a1, a2
5919; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
5920; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
5921; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
5922; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
5923; CHECK-NOV-NEXT:    .cfi_restore ra
5924; CHECK-NOV-NEXT:    .cfi_restore s0
5925; CHECK-NOV-NEXT:    .cfi_restore s1
5926; CHECK-NOV-NEXT:    .cfi_restore fs0
5927; CHECK-NOV-NEXT:    addi sp, sp, 32
5928; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
5929; CHECK-NOV-NEXT:    ret
5930;
5931; CHECK-V-LABEL: stest_f64i64_mm:
5932; CHECK-V:       # %bb.0: # %entry
5933; CHECK-V-NEXT:    addi sp, sp, -64
5934; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
5935; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
5936; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
5937; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
5938; CHECK-V-NEXT:    .cfi_offset ra, -8
5939; CHECK-V-NEXT:    .cfi_offset s0, -16
5940; CHECK-V-NEXT:    .cfi_offset s1, -24
5941; CHECK-V-NEXT:    csrr a0, vlenb
5942; CHECK-V-NEXT:    sub sp, sp, a0
5943; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
5944; CHECK-V-NEXT:    addi a0, sp, 32
5945; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
5946; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
5947; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
5948; CHECK-V-NEXT:    vfmv.f.s fa0, v9
5949; CHECK-V-NEXT:    call __fixdfti
5950; CHECK-V-NEXT:    mv s0, a0
5951; CHECK-V-NEXT:    mv s1, a1
5952; CHECK-V-NEXT:    fld fa0, 32(sp) # 8-byte Folded Reload
5953; CHECK-V-NEXT:    call __fixdfti
5954; CHECK-V-NEXT:    li a2, -1
5955; CHECK-V-NEXT:    srli a3, a2, 1
5956; CHECK-V-NEXT:    beqz a1, .LBB45_2
5957; CHECK-V-NEXT:  # %bb.1: # %entry
5958; CHECK-V-NEXT:    slti a4, a1, 0
5959; CHECK-V-NEXT:    beqz a4, .LBB45_3
5960; CHECK-V-NEXT:    j .LBB45_4
5961; CHECK-V-NEXT:  .LBB45_2:
5962; CHECK-V-NEXT:    sltu a4, a0, a3
5963; CHECK-V-NEXT:    bnez a4, .LBB45_4
5964; CHECK-V-NEXT:  .LBB45_3: # %entry
5965; CHECK-V-NEXT:    mv a0, a3
5966; CHECK-V-NEXT:  .LBB45_4: # %entry
5967; CHECK-V-NEXT:    beqz s1, .LBB45_6
5968; CHECK-V-NEXT:  # %bb.5: # %entry
5969; CHECK-V-NEXT:    slti a6, s1, 0
5970; CHECK-V-NEXT:    j .LBB45_7
5971; CHECK-V-NEXT:  .LBB45_6:
5972; CHECK-V-NEXT:    sltu a6, s0, a3
5973; CHECK-V-NEXT:  .LBB45_7: # %entry
5974; CHECK-V-NEXT:    neg a5, a6
5975; CHECK-V-NEXT:    and a5, a5, s1
5976; CHECK-V-NEXT:    bnez a6, .LBB45_9
5977; CHECK-V-NEXT:  # %bb.8: # %entry
5978; CHECK-V-NEXT:    mv s0, a3
5979; CHECK-V-NEXT:  .LBB45_9: # %entry
5980; CHECK-V-NEXT:    neg a4, a4
5981; CHECK-V-NEXT:    slli a3, a2, 63
5982; CHECK-V-NEXT:    beq a5, a2, .LBB45_11
5983; CHECK-V-NEXT:  # %bb.10: # %entry
5984; CHECK-V-NEXT:    slti a5, a5, 0
5985; CHECK-V-NEXT:    xori a5, a5, 1
5986; CHECK-V-NEXT:    and a1, a4, a1
5987; CHECK-V-NEXT:    beqz a5, .LBB45_12
5988; CHECK-V-NEXT:    j .LBB45_13
5989; CHECK-V-NEXT:  .LBB45_11:
5990; CHECK-V-NEXT:    sltu a5, a3, s0
5991; CHECK-V-NEXT:    and a1, a4, a1
5992; CHECK-V-NEXT:    bnez a5, .LBB45_13
5993; CHECK-V-NEXT:  .LBB45_12: # %entry
5994; CHECK-V-NEXT:    mv s0, a3
5995; CHECK-V-NEXT:  .LBB45_13: # %entry
5996; CHECK-V-NEXT:    beq a1, a2, .LBB45_15
5997; CHECK-V-NEXT:  # %bb.14: # %entry
5998; CHECK-V-NEXT:    slti a1, a1, 0
5999; CHECK-V-NEXT:    xori a1, a1, 1
6000; CHECK-V-NEXT:    beqz a1, .LBB45_16
6001; CHECK-V-NEXT:    j .LBB45_17
6002; CHECK-V-NEXT:  .LBB45_15:
6003; CHECK-V-NEXT:    sltu a1, a3, a0
6004; CHECK-V-NEXT:    bnez a1, .LBB45_17
6005; CHECK-V-NEXT:  .LBB45_16: # %entry
6006; CHECK-V-NEXT:    mv a0, a3
6007; CHECK-V-NEXT:  .LBB45_17: # %entry
6008; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
6009; CHECK-V-NEXT:    vmv.s.x v8, a0
6010; CHECK-V-NEXT:    vmv.s.x v9, s0
6011; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
6012; CHECK-V-NEXT:    csrr a0, vlenb
6013; CHECK-V-NEXT:    add sp, sp, a0
6014; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
6015; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
6016; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
6017; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
6018; CHECK-V-NEXT:    .cfi_restore ra
6019; CHECK-V-NEXT:    .cfi_restore s0
6020; CHECK-V-NEXT:    .cfi_restore s1
6021; CHECK-V-NEXT:    addi sp, sp, 64
6022; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
6023; CHECK-V-NEXT:    ret
6024entry:
6025  %conv = fptosi <2 x double> %x to <2 x i128>
6026  %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>)
6027  %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>)
6028  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
6029  ret <2 x i64> %conv6
6030}
6031
6032define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
6033; CHECK-NOV-LABEL: utest_f64i64_mm:
6034; CHECK-NOV:       # %bb.0: # %entry
6035; CHECK-NOV-NEXT:    addi sp, sp, -32
6036; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
6037; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6038; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6039; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6040; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
6041; CHECK-NOV-NEXT:    .cfi_offset ra, -8
6042; CHECK-NOV-NEXT:    .cfi_offset s0, -16
6043; CHECK-NOV-NEXT:    .cfi_offset s1, -24
6044; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
6045; CHECK-NOV-NEXT:    fmv.d fs0, fa0
6046; CHECK-NOV-NEXT:    fmv.d fa0, fa1
6047; CHECK-NOV-NEXT:    call __fixunsdfti
6048; CHECK-NOV-NEXT:    mv s0, a0
6049; CHECK-NOV-NEXT:    mv s1, a1
6050; CHECK-NOV-NEXT:    fmv.d fa0, fs0
6051; CHECK-NOV-NEXT:    call __fixunsdfti
6052; CHECK-NOV-NEXT:    snez a1, a1
6053; CHECK-NOV-NEXT:    snez a2, s1
6054; CHECK-NOV-NEXT:    addi a1, a1, -1
6055; CHECK-NOV-NEXT:    addi a2, a2, -1
6056; CHECK-NOV-NEXT:    and a0, a1, a0
6057; CHECK-NOV-NEXT:    and a1, a2, s0
6058; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
6059; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
6060; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
6061; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
6062; CHECK-NOV-NEXT:    .cfi_restore ra
6063; CHECK-NOV-NEXT:    .cfi_restore s0
6064; CHECK-NOV-NEXT:    .cfi_restore s1
6065; CHECK-NOV-NEXT:    .cfi_restore fs0
6066; CHECK-NOV-NEXT:    addi sp, sp, 32
6067; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
6068; CHECK-NOV-NEXT:    ret
6069;
6070; CHECK-V-LABEL: utest_f64i64_mm:
6071; CHECK-V:       # %bb.0: # %entry
6072; CHECK-V-NEXT:    addi sp, sp, -64
6073; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
6074; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
6075; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
6076; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
6077; CHECK-V-NEXT:    .cfi_offset ra, -8
6078; CHECK-V-NEXT:    .cfi_offset s0, -16
6079; CHECK-V-NEXT:    .cfi_offset s1, -24
6080; CHECK-V-NEXT:    csrr a0, vlenb
6081; CHECK-V-NEXT:    sub sp, sp, a0
6082; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
6083; CHECK-V-NEXT:    addi a0, sp, 32
6084; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
6085; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
6086; CHECK-V-NEXT:    vfmv.f.s fa0, v8
6087; CHECK-V-NEXT:    call __fixunsdfti
6088; CHECK-V-NEXT:    mv s0, a0
6089; CHECK-V-NEXT:    mv s1, a1
6090; CHECK-V-NEXT:    addi a0, sp, 32
6091; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
6092; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
6093; CHECK-V-NEXT:    vslidedown.vi v8, v8, 1
6094; CHECK-V-NEXT:    vfmv.f.s fa0, v8
6095; CHECK-V-NEXT:    call __fixunsdfti
6096; CHECK-V-NEXT:    snez a1, a1
6097; CHECK-V-NEXT:    snez a2, s1
6098; CHECK-V-NEXT:    addi a1, a1, -1
6099; CHECK-V-NEXT:    addi a2, a2, -1
6100; CHECK-V-NEXT:    and a0, a1, a0
6101; CHECK-V-NEXT:    and a2, a2, s0
6102; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
6103; CHECK-V-NEXT:    vmv.s.x v8, a2
6104; CHECK-V-NEXT:    vmv.s.x v9, a0
6105; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
6106; CHECK-V-NEXT:    csrr a0, vlenb
6107; CHECK-V-NEXT:    add sp, sp, a0
6108; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
6109; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
6110; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
6111; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
6112; CHECK-V-NEXT:    .cfi_restore ra
6113; CHECK-V-NEXT:    .cfi_restore s0
6114; CHECK-V-NEXT:    .cfi_restore s1
6115; CHECK-V-NEXT:    addi sp, sp, 64
6116; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
6117; CHECK-V-NEXT:    ret
6118entry:
6119  %conv = fptoui <2 x double> %x to <2 x i128>
6120  %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
6121  %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
6122  ret <2 x i64> %conv6
6123}
6124
6125define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
6126; CHECK-NOV-LABEL: ustest_f64i64_mm:
6127; CHECK-NOV:       # %bb.0: # %entry
6128; CHECK-NOV-NEXT:    addi sp, sp, -32
6129; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
6130; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6131; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6132; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6133; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
6134; CHECK-NOV-NEXT:    .cfi_offset ra, -8
6135; CHECK-NOV-NEXT:    .cfi_offset s0, -16
6136; CHECK-NOV-NEXT:    .cfi_offset s1, -24
6137; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
6138; CHECK-NOV-NEXT:    fmv.d fs0, fa1
6139; CHECK-NOV-NEXT:    call __fixdfti
6140; CHECK-NOV-NEXT:    mv s0, a0
6141; CHECK-NOV-NEXT:    mv s1, a1
6142; CHECK-NOV-NEXT:    fmv.d fa0, fs0
6143; CHECK-NOV-NEXT:    call __fixdfti
6144; CHECK-NOV-NEXT:    mv a2, a1
6145; CHECK-NOV-NEXT:    blez a1, .LBB47_2
6146; CHECK-NOV-NEXT:  # %bb.1: # %entry
6147; CHECK-NOV-NEXT:    li a2, 1
6148; CHECK-NOV-NEXT:  .LBB47_2: # %entry
6149; CHECK-NOV-NEXT:    mv a3, s1
6150; CHECK-NOV-NEXT:    blez s1, .LBB47_4
6151; CHECK-NOV-NEXT:  # %bb.3: # %entry
6152; CHECK-NOV-NEXT:    li a3, 1
6153; CHECK-NOV-NEXT:  .LBB47_4: # %entry
6154; CHECK-NOV-NEXT:    slti a1, a1, 1
6155; CHECK-NOV-NEXT:    slti a4, s1, 1
6156; CHECK-NOV-NEXT:    slti a3, a3, 0
6157; CHECK-NOV-NEXT:    slti a2, a2, 0
6158; CHECK-NOV-NEXT:    neg a1, a1
6159; CHECK-NOV-NEXT:    neg a4, a4
6160; CHECK-NOV-NEXT:    addi a3, a3, -1
6161; CHECK-NOV-NEXT:    addi a2, a2, -1
6162; CHECK-NOV-NEXT:    and a1, a1, a0
6163; CHECK-NOV-NEXT:    and a0, a4, s0
6164; CHECK-NOV-NEXT:    and a0, a3, a0
6165; CHECK-NOV-NEXT:    and a1, a2, a1
6166; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
6167; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
6168; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
6169; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
6170; CHECK-NOV-NEXT:    .cfi_restore ra
6171; CHECK-NOV-NEXT:    .cfi_restore s0
6172; CHECK-NOV-NEXT:    .cfi_restore s1
6173; CHECK-NOV-NEXT:    .cfi_restore fs0
6174; CHECK-NOV-NEXT:    addi sp, sp, 32
6175; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
6176; CHECK-NOV-NEXT:    ret
6177;
6178; CHECK-V-LABEL: ustest_f64i64_mm:
6179; CHECK-V:       # %bb.0: # %entry
6180; CHECK-V-NEXT:    addi sp, sp, -64
6181; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
6182; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
6183; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
6184; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
6185; CHECK-V-NEXT:    .cfi_offset ra, -8
6186; CHECK-V-NEXT:    .cfi_offset s0, -16
6187; CHECK-V-NEXT:    .cfi_offset s1, -24
6188; CHECK-V-NEXT:    csrr a0, vlenb
6189; CHECK-V-NEXT:    sub sp, sp, a0
6190; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
6191; CHECK-V-NEXT:    addi a0, sp, 32
6192; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
6193; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
6194; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
6195; CHECK-V-NEXT:    vfmv.f.s fa0, v9
6196; CHECK-V-NEXT:    call __fixdfti
6197; CHECK-V-NEXT:    mv s0, a0
6198; CHECK-V-NEXT:    mv s1, a1
6199; CHECK-V-NEXT:    fld fa0, 32(sp) # 8-byte Folded Reload
6200; CHECK-V-NEXT:    call __fixdfti
6201; CHECK-V-NEXT:    mv a2, a1
6202; CHECK-V-NEXT:    blez a1, .LBB47_2
6203; CHECK-V-NEXT:  # %bb.1: # %entry
6204; CHECK-V-NEXT:    li a2, 1
6205; CHECK-V-NEXT:  .LBB47_2: # %entry
6206; CHECK-V-NEXT:    mv a3, s1
6207; CHECK-V-NEXT:    blez s1, .LBB47_4
6208; CHECK-V-NEXT:  # %bb.3: # %entry
6209; CHECK-V-NEXT:    li a3, 1
6210; CHECK-V-NEXT:  .LBB47_4: # %entry
6211; CHECK-V-NEXT:    slti a1, a1, 1
6212; CHECK-V-NEXT:    slti a4, s1, 1
6213; CHECK-V-NEXT:    slti a3, a3, 0
6214; CHECK-V-NEXT:    slti a2, a2, 0
6215; CHECK-V-NEXT:    neg a1, a1
6216; CHECK-V-NEXT:    neg a4, a4
6217; CHECK-V-NEXT:    addi a3, a3, -1
6218; CHECK-V-NEXT:    addi a2, a2, -1
6219; CHECK-V-NEXT:    and a0, a1, a0
6220; CHECK-V-NEXT:    and a4, a4, s0
6221; CHECK-V-NEXT:    and a3, a3, a4
6222; CHECK-V-NEXT:    and a0, a2, a0
6223; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
6224; CHECK-V-NEXT:    vmv.s.x v8, a0
6225; CHECK-V-NEXT:    vmv.s.x v9, a3
6226; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
6227; CHECK-V-NEXT:    csrr a0, vlenb
6228; CHECK-V-NEXT:    add sp, sp, a0
6229; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
6230; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
6231; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
6232; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
6233; CHECK-V-NEXT:    .cfi_restore ra
6234; CHECK-V-NEXT:    .cfi_restore s0
6235; CHECK-V-NEXT:    .cfi_restore s1
6236; CHECK-V-NEXT:    addi sp, sp, 64
6237; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
6238; CHECK-V-NEXT:    ret
6239entry:
6240  %conv = fptosi <2 x double> %x to <2 x i128>
6241  %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
6242  %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer)
6243  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
6244  ret <2 x i64> %conv6
6245}
6246
6247define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
6248; CHECK-NOV-LABEL: stest_f32i64_mm:
6249; CHECK-NOV:       # %bb.0: # %entry
6250; CHECK-NOV-NEXT:    addi sp, sp, -32
6251; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
6252; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6253; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6254; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6255; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
6256; CHECK-NOV-NEXT:    .cfi_offset ra, -8
6257; CHECK-NOV-NEXT:    .cfi_offset s0, -16
6258; CHECK-NOV-NEXT:    .cfi_offset s1, -24
6259; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
6260; CHECK-NOV-NEXT:    fmv.s fs0, fa1
6261; CHECK-NOV-NEXT:    call __fixsfti
6262; CHECK-NOV-NEXT:    mv s0, a0
6263; CHECK-NOV-NEXT:    mv s1, a1
6264; CHECK-NOV-NEXT:    fmv.s fa0, fs0
6265; CHECK-NOV-NEXT:    call __fixsfti
6266; CHECK-NOV-NEXT:    mv a2, a0
6267; CHECK-NOV-NEXT:    li a0, -1
6268; CHECK-NOV-NEXT:    srli a3, a0, 1
6269; CHECK-NOV-NEXT:    beqz a1, .LBB48_2
6270; CHECK-NOV-NEXT:  # %bb.1: # %entry
6271; CHECK-NOV-NEXT:    slti a4, a1, 0
6272; CHECK-NOV-NEXT:    beqz a4, .LBB48_3
6273; CHECK-NOV-NEXT:    j .LBB48_4
6274; CHECK-NOV-NEXT:  .LBB48_2:
6275; CHECK-NOV-NEXT:    sltu a4, a2, a3
6276; CHECK-NOV-NEXT:    bnez a4, .LBB48_4
6277; CHECK-NOV-NEXT:  .LBB48_3: # %entry
6278; CHECK-NOV-NEXT:    mv a2, a3
6279; CHECK-NOV-NEXT:  .LBB48_4: # %entry
6280; CHECK-NOV-NEXT:    beqz s1, .LBB48_6
6281; CHECK-NOV-NEXT:  # %bb.5: # %entry
6282; CHECK-NOV-NEXT:    slti a6, s1, 0
6283; CHECK-NOV-NEXT:    j .LBB48_7
6284; CHECK-NOV-NEXT:  .LBB48_6:
6285; CHECK-NOV-NEXT:    sltu a6, s0, a3
6286; CHECK-NOV-NEXT:  .LBB48_7: # %entry
6287; CHECK-NOV-NEXT:    neg a5, a6
6288; CHECK-NOV-NEXT:    and a5, a5, s1
6289; CHECK-NOV-NEXT:    bnez a6, .LBB48_9
6290; CHECK-NOV-NEXT:  # %bb.8: # %entry
6291; CHECK-NOV-NEXT:    mv s0, a3
6292; CHECK-NOV-NEXT:  .LBB48_9: # %entry
6293; CHECK-NOV-NEXT:    neg a4, a4
6294; CHECK-NOV-NEXT:    slli a3, a0, 63
6295; CHECK-NOV-NEXT:    beq a5, a0, .LBB48_11
6296; CHECK-NOV-NEXT:  # %bb.10: # %entry
6297; CHECK-NOV-NEXT:    slti a5, a5, 0
6298; CHECK-NOV-NEXT:    xori a5, a5, 1
6299; CHECK-NOV-NEXT:    and a1, a4, a1
6300; CHECK-NOV-NEXT:    beqz a5, .LBB48_12
6301; CHECK-NOV-NEXT:    j .LBB48_13
6302; CHECK-NOV-NEXT:  .LBB48_11:
6303; CHECK-NOV-NEXT:    sltu a5, a3, s0
6304; CHECK-NOV-NEXT:    and a1, a4, a1
6305; CHECK-NOV-NEXT:    bnez a5, .LBB48_13
6306; CHECK-NOV-NEXT:  .LBB48_12: # %entry
6307; CHECK-NOV-NEXT:    mv s0, a3
6308; CHECK-NOV-NEXT:  .LBB48_13: # %entry
6309; CHECK-NOV-NEXT:    beq a1, a0, .LBB48_15
6310; CHECK-NOV-NEXT:  # %bb.14: # %entry
6311; CHECK-NOV-NEXT:    slti a0, a1, 0
6312; CHECK-NOV-NEXT:    xori a0, a0, 1
6313; CHECK-NOV-NEXT:    beqz a0, .LBB48_16
6314; CHECK-NOV-NEXT:    j .LBB48_17
6315; CHECK-NOV-NEXT:  .LBB48_15:
6316; CHECK-NOV-NEXT:    sltu a0, a3, a2
6317; CHECK-NOV-NEXT:    bnez a0, .LBB48_17
6318; CHECK-NOV-NEXT:  .LBB48_16: # %entry
6319; CHECK-NOV-NEXT:    mv a2, a3
6320; CHECK-NOV-NEXT:  .LBB48_17: # %entry
6321; CHECK-NOV-NEXT:    mv a0, s0
6322; CHECK-NOV-NEXT:    mv a1, a2
6323; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
6324; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
6325; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
6326; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
6327; CHECK-NOV-NEXT:    .cfi_restore ra
6328; CHECK-NOV-NEXT:    .cfi_restore s0
6329; CHECK-NOV-NEXT:    .cfi_restore s1
6330; CHECK-NOV-NEXT:    .cfi_restore fs0
6331; CHECK-NOV-NEXT:    addi sp, sp, 32
6332; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
6333; CHECK-NOV-NEXT:    ret
6334;
6335; CHECK-V-LABEL: stest_f32i64_mm:
6336; CHECK-V:       # %bb.0: # %entry
6337; CHECK-V-NEXT:    addi sp, sp, -64
6338; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
6339; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
6340; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
6341; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
6342; CHECK-V-NEXT:    .cfi_offset ra, -8
6343; CHECK-V-NEXT:    .cfi_offset s0, -16
6344; CHECK-V-NEXT:    .cfi_offset s1, -24
6345; CHECK-V-NEXT:    csrr a0, vlenb
6346; CHECK-V-NEXT:    sub sp, sp, a0
6347; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
6348; CHECK-V-NEXT:    addi a0, sp, 32
6349; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
6350; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
6351; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
6352; CHECK-V-NEXT:    vfmv.f.s fa0, v9
6353; CHECK-V-NEXT:    call __fixsfti
6354; CHECK-V-NEXT:    mv s0, a0
6355; CHECK-V-NEXT:    mv s1, a1
6356; CHECK-V-NEXT:    flw fa0, 32(sp) # 8-byte Folded Reload
6357; CHECK-V-NEXT:    call __fixsfti
6358; CHECK-V-NEXT:    li a2, -1
6359; CHECK-V-NEXT:    srli a3, a2, 1
6360; CHECK-V-NEXT:    beqz a1, .LBB48_2
6361; CHECK-V-NEXT:  # %bb.1: # %entry
6362; CHECK-V-NEXT:    slti a4, a1, 0
6363; CHECK-V-NEXT:    beqz a4, .LBB48_3
6364; CHECK-V-NEXT:    j .LBB48_4
6365; CHECK-V-NEXT:  .LBB48_2:
6366; CHECK-V-NEXT:    sltu a4, a0, a3
6367; CHECK-V-NEXT:    bnez a4, .LBB48_4
6368; CHECK-V-NEXT:  .LBB48_3: # %entry
6369; CHECK-V-NEXT:    mv a0, a3
6370; CHECK-V-NEXT:  .LBB48_4: # %entry
6371; CHECK-V-NEXT:    beqz s1, .LBB48_6
6372; CHECK-V-NEXT:  # %bb.5: # %entry
6373; CHECK-V-NEXT:    slti a6, s1, 0
6374; CHECK-V-NEXT:    j .LBB48_7
6375; CHECK-V-NEXT:  .LBB48_6:
6376; CHECK-V-NEXT:    sltu a6, s0, a3
6377; CHECK-V-NEXT:  .LBB48_7: # %entry
6378; CHECK-V-NEXT:    neg a5, a6
6379; CHECK-V-NEXT:    and a5, a5, s1
6380; CHECK-V-NEXT:    bnez a6, .LBB48_9
6381; CHECK-V-NEXT:  # %bb.8: # %entry
6382; CHECK-V-NEXT:    mv s0, a3
6383; CHECK-V-NEXT:  .LBB48_9: # %entry
6384; CHECK-V-NEXT:    neg a4, a4
6385; CHECK-V-NEXT:    slli a3, a2, 63
6386; CHECK-V-NEXT:    beq a5, a2, .LBB48_11
6387; CHECK-V-NEXT:  # %bb.10: # %entry
6388; CHECK-V-NEXT:    slti a5, a5, 0
6389; CHECK-V-NEXT:    xori a5, a5, 1
6390; CHECK-V-NEXT:    and a1, a4, a1
6391; CHECK-V-NEXT:    beqz a5, .LBB48_12
6392; CHECK-V-NEXT:    j .LBB48_13
6393; CHECK-V-NEXT:  .LBB48_11:
6394; CHECK-V-NEXT:    sltu a5, a3, s0
6395; CHECK-V-NEXT:    and a1, a4, a1
6396; CHECK-V-NEXT:    bnez a5, .LBB48_13
6397; CHECK-V-NEXT:  .LBB48_12: # %entry
6398; CHECK-V-NEXT:    mv s0, a3
6399; CHECK-V-NEXT:  .LBB48_13: # %entry
6400; CHECK-V-NEXT:    beq a1, a2, .LBB48_15
6401; CHECK-V-NEXT:  # %bb.14: # %entry
6402; CHECK-V-NEXT:    slti a1, a1, 0
6403; CHECK-V-NEXT:    xori a1, a1, 1
6404; CHECK-V-NEXT:    beqz a1, .LBB48_16
6405; CHECK-V-NEXT:    j .LBB48_17
6406; CHECK-V-NEXT:  .LBB48_15:
6407; CHECK-V-NEXT:    sltu a1, a3, a0
6408; CHECK-V-NEXT:    bnez a1, .LBB48_17
6409; CHECK-V-NEXT:  .LBB48_16: # %entry
6410; CHECK-V-NEXT:    mv a0, a3
6411; CHECK-V-NEXT:  .LBB48_17: # %entry
6412; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
6413; CHECK-V-NEXT:    vmv.s.x v8, a0
6414; CHECK-V-NEXT:    vmv.s.x v9, s0
6415; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
6416; CHECK-V-NEXT:    csrr a0, vlenb
6417; CHECK-V-NEXT:    add sp, sp, a0
6418; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
6419; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
6420; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
6421; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
6422; CHECK-V-NEXT:    .cfi_restore ra
6423; CHECK-V-NEXT:    .cfi_restore s0
6424; CHECK-V-NEXT:    .cfi_restore s1
6425; CHECK-V-NEXT:    addi sp, sp, 64
6426; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
6427; CHECK-V-NEXT:    ret
6428entry:
6429  %conv = fptosi <2 x float> %x to <2 x i128>
6430  %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>)
6431  %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>)
6432  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
6433  ret <2 x i64> %conv6
6434}
6435
6436define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
6437; CHECK-NOV-LABEL: utest_f32i64_mm:
6438; CHECK-NOV:       # %bb.0: # %entry
6439; CHECK-NOV-NEXT:    addi sp, sp, -32
6440; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
6441; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6442; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6443; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6444; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
6445; CHECK-NOV-NEXT:    .cfi_offset ra, -8
6446; CHECK-NOV-NEXT:    .cfi_offset s0, -16
6447; CHECK-NOV-NEXT:    .cfi_offset s1, -24
6448; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
6449; CHECK-NOV-NEXT:    fmv.s fs0, fa0
6450; CHECK-NOV-NEXT:    fmv.s fa0, fa1
6451; CHECK-NOV-NEXT:    call __fixunssfti
6452; CHECK-NOV-NEXT:    mv s0, a0
6453; CHECK-NOV-NEXT:    mv s1, a1
6454; CHECK-NOV-NEXT:    fmv.s fa0, fs0
6455; CHECK-NOV-NEXT:    call __fixunssfti
6456; CHECK-NOV-NEXT:    snez a1, a1
6457; CHECK-NOV-NEXT:    snez a2, s1
6458; CHECK-NOV-NEXT:    addi a1, a1, -1
6459; CHECK-NOV-NEXT:    addi a2, a2, -1
6460; CHECK-NOV-NEXT:    and a0, a1, a0
6461; CHECK-NOV-NEXT:    and a1, a2, s0
6462; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
6463; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
6464; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
6465; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
6466; CHECK-NOV-NEXT:    .cfi_restore ra
6467; CHECK-NOV-NEXT:    .cfi_restore s0
6468; CHECK-NOV-NEXT:    .cfi_restore s1
6469; CHECK-NOV-NEXT:    .cfi_restore fs0
6470; CHECK-NOV-NEXT:    addi sp, sp, 32
6471; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
6472; CHECK-NOV-NEXT:    ret
6473;
6474; CHECK-V-LABEL: utest_f32i64_mm:
6475; CHECK-V:       # %bb.0: # %entry
6476; CHECK-V-NEXT:    addi sp, sp, -64
6477; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
6478; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
6479; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
6480; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
6481; CHECK-V-NEXT:    .cfi_offset ra, -8
6482; CHECK-V-NEXT:    .cfi_offset s0, -16
6483; CHECK-V-NEXT:    .cfi_offset s1, -24
6484; CHECK-V-NEXT:    csrr a0, vlenb
6485; CHECK-V-NEXT:    sub sp, sp, a0
6486; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
6487; CHECK-V-NEXT:    addi a0, sp, 32
6488; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
6489; CHECK-V-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
6490; CHECK-V-NEXT:    vfmv.f.s fa0, v8
6491; CHECK-V-NEXT:    call __fixunssfti
6492; CHECK-V-NEXT:    mv s0, a0
6493; CHECK-V-NEXT:    mv s1, a1
6494; CHECK-V-NEXT:    addi a0, sp, 32
6495; CHECK-V-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
6496; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
6497; CHECK-V-NEXT:    vslidedown.vi v8, v8, 1
6498; CHECK-V-NEXT:    vfmv.f.s fa0, v8
6499; CHECK-V-NEXT:    call __fixunssfti
6500; CHECK-V-NEXT:    snez a1, a1
6501; CHECK-V-NEXT:    snez a2, s1
6502; CHECK-V-NEXT:    addi a1, a1, -1
6503; CHECK-V-NEXT:    addi a2, a2, -1
6504; CHECK-V-NEXT:    and a0, a1, a0
6505; CHECK-V-NEXT:    and a2, a2, s0
6506; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
6507; CHECK-V-NEXT:    vmv.s.x v8, a2
6508; CHECK-V-NEXT:    vmv.s.x v9, a0
6509; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
6510; CHECK-V-NEXT:    csrr a0, vlenb
6511; CHECK-V-NEXT:    add sp, sp, a0
6512; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
6513; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
6514; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
6515; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
6516; CHECK-V-NEXT:    .cfi_restore ra
6517; CHECK-V-NEXT:    .cfi_restore s0
6518; CHECK-V-NEXT:    .cfi_restore s1
6519; CHECK-V-NEXT:    addi sp, sp, 64
6520; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
6521; CHECK-V-NEXT:    ret
6522entry:
6523  %conv = fptoui <2 x float> %x to <2 x i128>
6524  %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
6525  %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
6526  ret <2 x i64> %conv6
6527}
6528
6529define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
6530; CHECK-NOV-LABEL: ustest_f32i64_mm:
6531; CHECK-NOV:       # %bb.0: # %entry
6532; CHECK-NOV-NEXT:    addi sp, sp, -32
6533; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
6534; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6535; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6536; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6537; CHECK-NOV-NEXT:    fsd fs0, 0(sp) # 8-byte Folded Spill
6538; CHECK-NOV-NEXT:    .cfi_offset ra, -8
6539; CHECK-NOV-NEXT:    .cfi_offset s0, -16
6540; CHECK-NOV-NEXT:    .cfi_offset s1, -24
6541; CHECK-NOV-NEXT:    .cfi_offset fs0, -32
6542; CHECK-NOV-NEXT:    fmv.s fs0, fa1
6543; CHECK-NOV-NEXT:    call __fixsfti
6544; CHECK-NOV-NEXT:    mv s0, a0
6545; CHECK-NOV-NEXT:    mv s1, a1
6546; CHECK-NOV-NEXT:    fmv.s fa0, fs0
6547; CHECK-NOV-NEXT:    call __fixsfti
6548; CHECK-NOV-NEXT:    mv a2, a1
6549; CHECK-NOV-NEXT:    blez a1, .LBB50_2
6550; CHECK-NOV-NEXT:  # %bb.1: # %entry
6551; CHECK-NOV-NEXT:    li a2, 1
6552; CHECK-NOV-NEXT:  .LBB50_2: # %entry
6553; CHECK-NOV-NEXT:    mv a3, s1
6554; CHECK-NOV-NEXT:    blez s1, .LBB50_4
6555; CHECK-NOV-NEXT:  # %bb.3: # %entry
6556; CHECK-NOV-NEXT:    li a3, 1
6557; CHECK-NOV-NEXT:  .LBB50_4: # %entry
6558; CHECK-NOV-NEXT:    slti a1, a1, 1
6559; CHECK-NOV-NEXT:    slti a4, s1, 1
6560; CHECK-NOV-NEXT:    slti a3, a3, 0
6561; CHECK-NOV-NEXT:    slti a2, a2, 0
6562; CHECK-NOV-NEXT:    neg a1, a1
6563; CHECK-NOV-NEXT:    neg a4, a4
6564; CHECK-NOV-NEXT:    addi a3, a3, -1
6565; CHECK-NOV-NEXT:    addi a2, a2, -1
6566; CHECK-NOV-NEXT:    and a1, a1, a0
6567; CHECK-NOV-NEXT:    and a0, a4, s0
6568; CHECK-NOV-NEXT:    and a0, a3, a0
6569; CHECK-NOV-NEXT:    and a1, a2, a1
6570; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
6571; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
6572; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
6573; CHECK-NOV-NEXT:    fld fs0, 0(sp) # 8-byte Folded Reload
6574; CHECK-NOV-NEXT:    .cfi_restore ra
6575; CHECK-NOV-NEXT:    .cfi_restore s0
6576; CHECK-NOV-NEXT:    .cfi_restore s1
6577; CHECK-NOV-NEXT:    .cfi_restore fs0
6578; CHECK-NOV-NEXT:    addi sp, sp, 32
6579; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
6580; CHECK-NOV-NEXT:    ret
6581;
6582; CHECK-V-LABEL: ustest_f32i64_mm:
6583; CHECK-V:       # %bb.0: # %entry
6584; CHECK-V-NEXT:    addi sp, sp, -64
6585; CHECK-V-NEXT:    .cfi_def_cfa_offset 64
6586; CHECK-V-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
6587; CHECK-V-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
6588; CHECK-V-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
6589; CHECK-V-NEXT:    .cfi_offset ra, -8
6590; CHECK-V-NEXT:    .cfi_offset s0, -16
6591; CHECK-V-NEXT:    .cfi_offset s1, -24
6592; CHECK-V-NEXT:    csrr a0, vlenb
6593; CHECK-V-NEXT:    sub sp, sp, a0
6594; CHECK-V-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
6595; CHECK-V-NEXT:    addi a0, sp, 32
6596; CHECK-V-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
6597; CHECK-V-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
6598; CHECK-V-NEXT:    vslidedown.vi v9, v8, 1
6599; CHECK-V-NEXT:    vfmv.f.s fa0, v9
6600; CHECK-V-NEXT:    call __fixsfti
6601; CHECK-V-NEXT:    mv s0, a0
6602; CHECK-V-NEXT:    mv s1, a1
6603; CHECK-V-NEXT:    flw fa0, 32(sp) # 8-byte Folded Reload
6604; CHECK-V-NEXT:    call __fixsfti
6605; CHECK-V-NEXT:    mv a2, a1
6606; CHECK-V-NEXT:    blez a1, .LBB50_2
6607; CHECK-V-NEXT:  # %bb.1: # %entry
6608; CHECK-V-NEXT:    li a2, 1
6609; CHECK-V-NEXT:  .LBB50_2: # %entry
6610; CHECK-V-NEXT:    mv a3, s1
6611; CHECK-V-NEXT:    blez s1, .LBB50_4
6612; CHECK-V-NEXT:  # %bb.3: # %entry
6613; CHECK-V-NEXT:    li a3, 1
6614; CHECK-V-NEXT:  .LBB50_4: # %entry
6615; CHECK-V-NEXT:    slti a1, a1, 1
6616; CHECK-V-NEXT:    slti a4, s1, 1
6617; CHECK-V-NEXT:    slti a3, a3, 0
6618; CHECK-V-NEXT:    slti a2, a2, 0
6619; CHECK-V-NEXT:    neg a1, a1
6620; CHECK-V-NEXT:    neg a4, a4
6621; CHECK-V-NEXT:    addi a3, a3, -1
6622; CHECK-V-NEXT:    addi a2, a2, -1
6623; CHECK-V-NEXT:    and a0, a1, a0
6624; CHECK-V-NEXT:    and a4, a4, s0
6625; CHECK-V-NEXT:    and a3, a3, a4
6626; CHECK-V-NEXT:    and a0, a2, a0
6627; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
6628; CHECK-V-NEXT:    vmv.s.x v8, a0
6629; CHECK-V-NEXT:    vmv.s.x v9, a3
6630; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
6631; CHECK-V-NEXT:    csrr a0, vlenb
6632; CHECK-V-NEXT:    add sp, sp, a0
6633; CHECK-V-NEXT:    .cfi_def_cfa sp, 64
6634; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
6635; CHECK-V-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
6636; CHECK-V-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
6637; CHECK-V-NEXT:    .cfi_restore ra
6638; CHECK-V-NEXT:    .cfi_restore s0
6639; CHECK-V-NEXT:    .cfi_restore s1
6640; CHECK-V-NEXT:    addi sp, sp, 64
6641; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
6642; CHECK-V-NEXT:    ret
6643entry:
6644  %conv = fptosi <2 x float> %x to <2 x i128>
6645  %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
6646  %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer)
6647  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
6648  ret <2 x i64> %conv6
6649}
6650
6651define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
6652; CHECK-NOV-LABEL: stest_f16i64_mm:
6653; CHECK-NOV:       # %bb.0: # %entry
6654; CHECK-NOV-NEXT:    addi sp, sp, -32
6655; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
6656; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6657; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6658; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6659; CHECK-NOV-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
6660; CHECK-NOV-NEXT:    .cfi_offset ra, -8
6661; CHECK-NOV-NEXT:    .cfi_offset s0, -16
6662; CHECK-NOV-NEXT:    .cfi_offset s1, -24
6663; CHECK-NOV-NEXT:    .cfi_offset s2, -32
6664; CHECK-NOV-NEXT:    mv s2, a1
6665; CHECK-NOV-NEXT:    fmv.w.x fa0, a0
6666; CHECK-NOV-NEXT:    call __extendhfsf2
6667; CHECK-NOV-NEXT:    call __fixsfti
6668; CHECK-NOV-NEXT:    mv s0, a0
6669; CHECK-NOV-NEXT:    mv s1, a1
6670; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
6671; CHECK-NOV-NEXT:    call __extendhfsf2
6672; CHECK-NOV-NEXT:    call __fixsfti
6673; CHECK-NOV-NEXT:    mv a2, a0
6674; CHECK-NOV-NEXT:    li a0, -1
6675; CHECK-NOV-NEXT:    srli a3, a0, 1
6676; CHECK-NOV-NEXT:    beqz a1, .LBB51_2
6677; CHECK-NOV-NEXT:  # %bb.1: # %entry
6678; CHECK-NOV-NEXT:    slti a4, a1, 0
6679; CHECK-NOV-NEXT:    beqz a4, .LBB51_3
6680; CHECK-NOV-NEXT:    j .LBB51_4
6681; CHECK-NOV-NEXT:  .LBB51_2:
6682; CHECK-NOV-NEXT:    sltu a4, a2, a3
6683; CHECK-NOV-NEXT:    bnez a4, .LBB51_4
6684; CHECK-NOV-NEXT:  .LBB51_3: # %entry
6685; CHECK-NOV-NEXT:    mv a2, a3
6686; CHECK-NOV-NEXT:  .LBB51_4: # %entry
6687; CHECK-NOV-NEXT:    beqz s1, .LBB51_6
6688; CHECK-NOV-NEXT:  # %bb.5: # %entry
6689; CHECK-NOV-NEXT:    slti a6, s1, 0
6690; CHECK-NOV-NEXT:    j .LBB51_7
6691; CHECK-NOV-NEXT:  .LBB51_6:
6692; CHECK-NOV-NEXT:    sltu a6, s0, a3
6693; CHECK-NOV-NEXT:  .LBB51_7: # %entry
6694; CHECK-NOV-NEXT:    neg a5, a6
6695; CHECK-NOV-NEXT:    and a5, a5, s1
6696; CHECK-NOV-NEXT:    bnez a6, .LBB51_9
6697; CHECK-NOV-NEXT:  # %bb.8: # %entry
6698; CHECK-NOV-NEXT:    mv s0, a3
6699; CHECK-NOV-NEXT:  .LBB51_9: # %entry
6700; CHECK-NOV-NEXT:    neg a4, a4
6701; CHECK-NOV-NEXT:    slli a3, a0, 63
6702; CHECK-NOV-NEXT:    beq a5, a0, .LBB51_11
6703; CHECK-NOV-NEXT:  # %bb.10: # %entry
6704; CHECK-NOV-NEXT:    slti a5, a5, 0
6705; CHECK-NOV-NEXT:    xori a5, a5, 1
6706; CHECK-NOV-NEXT:    and a1, a4, a1
6707; CHECK-NOV-NEXT:    beqz a5, .LBB51_12
6708; CHECK-NOV-NEXT:    j .LBB51_13
6709; CHECK-NOV-NEXT:  .LBB51_11:
6710; CHECK-NOV-NEXT:    sltu a5, a3, s0
6711; CHECK-NOV-NEXT:    and a1, a4, a1
6712; CHECK-NOV-NEXT:    bnez a5, .LBB51_13
6713; CHECK-NOV-NEXT:  .LBB51_12: # %entry
6714; CHECK-NOV-NEXT:    mv s0, a3
6715; CHECK-NOV-NEXT:  .LBB51_13: # %entry
6716; CHECK-NOV-NEXT:    beq a1, a0, .LBB51_15
6717; CHECK-NOV-NEXT:  # %bb.14: # %entry
6718; CHECK-NOV-NEXT:    slti a0, a1, 0
6719; CHECK-NOV-NEXT:    xori a0, a0, 1
6720; CHECK-NOV-NEXT:    beqz a0, .LBB51_16
6721; CHECK-NOV-NEXT:    j .LBB51_17
6722; CHECK-NOV-NEXT:  .LBB51_15:
6723; CHECK-NOV-NEXT:    sltu a0, a3, a2
6724; CHECK-NOV-NEXT:    bnez a0, .LBB51_17
6725; CHECK-NOV-NEXT:  .LBB51_16: # %entry
6726; CHECK-NOV-NEXT:    mv a2, a3
6727; CHECK-NOV-NEXT:  .LBB51_17: # %entry
6728; CHECK-NOV-NEXT:    mv a0, s0
6729; CHECK-NOV-NEXT:    mv a1, a2
6730; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
6731; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
6732; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
6733; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
6734; CHECK-NOV-NEXT:    .cfi_restore ra
6735; CHECK-NOV-NEXT:    .cfi_restore s0
6736; CHECK-NOV-NEXT:    .cfi_restore s1
6737; CHECK-NOV-NEXT:    .cfi_restore s2
6738; CHECK-NOV-NEXT:    addi sp, sp, 32
6739; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
6740; CHECK-NOV-NEXT:    ret
6741;
6742; CHECK-V-LABEL: stest_f16i64_mm:
6743; CHECK-V:       # %bb.0: # %entry
6744; CHECK-V-NEXT:    addi sp, sp, -32
6745; CHECK-V-NEXT:    .cfi_def_cfa_offset 32
6746; CHECK-V-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6747; CHECK-V-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6748; CHECK-V-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6749; CHECK-V-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
6750; CHECK-V-NEXT:    .cfi_offset ra, -8
6751; CHECK-V-NEXT:    .cfi_offset s0, -16
6752; CHECK-V-NEXT:    .cfi_offset s1, -24
6753; CHECK-V-NEXT:    .cfi_offset s2, -32
6754; CHECK-V-NEXT:    mv s2, a1
6755; CHECK-V-NEXT:    fmv.w.x fa0, a0
6756; CHECK-V-NEXT:    call __extendhfsf2
6757; CHECK-V-NEXT:    call __fixsfti
6758; CHECK-V-NEXT:    mv s0, a0
6759; CHECK-V-NEXT:    mv s1, a1
6760; CHECK-V-NEXT:    fmv.w.x fa0, s2
6761; CHECK-V-NEXT:    call __extendhfsf2
6762; CHECK-V-NEXT:    call __fixsfti
6763; CHECK-V-NEXT:    li a2, -1
6764; CHECK-V-NEXT:    srli a3, a2, 1
6765; CHECK-V-NEXT:    beqz a1, .LBB51_2
6766; CHECK-V-NEXT:  # %bb.1: # %entry
6767; CHECK-V-NEXT:    slti a4, a1, 0
6768; CHECK-V-NEXT:    beqz a4, .LBB51_3
6769; CHECK-V-NEXT:    j .LBB51_4
6770; CHECK-V-NEXT:  .LBB51_2:
6771; CHECK-V-NEXT:    sltu a4, a0, a3
6772; CHECK-V-NEXT:    bnez a4, .LBB51_4
6773; CHECK-V-NEXT:  .LBB51_3: # %entry
6774; CHECK-V-NEXT:    mv a0, a3
6775; CHECK-V-NEXT:  .LBB51_4: # %entry
6776; CHECK-V-NEXT:    beqz s1, .LBB51_6
6777; CHECK-V-NEXT:  # %bb.5: # %entry
6778; CHECK-V-NEXT:    slti a6, s1, 0
6779; CHECK-V-NEXT:    j .LBB51_7
6780; CHECK-V-NEXT:  .LBB51_6:
6781; CHECK-V-NEXT:    sltu a6, s0, a3
6782; CHECK-V-NEXT:  .LBB51_7: # %entry
6783; CHECK-V-NEXT:    neg a5, a6
6784; CHECK-V-NEXT:    and a5, a5, s1
6785; CHECK-V-NEXT:    bnez a6, .LBB51_9
6786; CHECK-V-NEXT:  # %bb.8: # %entry
6787; CHECK-V-NEXT:    mv s0, a3
6788; CHECK-V-NEXT:  .LBB51_9: # %entry
6789; CHECK-V-NEXT:    neg a4, a4
6790; CHECK-V-NEXT:    slli a3, a2, 63
6791; CHECK-V-NEXT:    beq a5, a2, .LBB51_11
6792; CHECK-V-NEXT:  # %bb.10: # %entry
6793; CHECK-V-NEXT:    slti a5, a5, 0
6794; CHECK-V-NEXT:    xori a5, a5, 1
6795; CHECK-V-NEXT:    and a1, a4, a1
6796; CHECK-V-NEXT:    beqz a5, .LBB51_12
6797; CHECK-V-NEXT:    j .LBB51_13
6798; CHECK-V-NEXT:  .LBB51_11:
6799; CHECK-V-NEXT:    sltu a5, a3, s0
6800; CHECK-V-NEXT:    and a1, a4, a1
6801; CHECK-V-NEXT:    bnez a5, .LBB51_13
6802; CHECK-V-NEXT:  .LBB51_12: # %entry
6803; CHECK-V-NEXT:    mv s0, a3
6804; CHECK-V-NEXT:  .LBB51_13: # %entry
6805; CHECK-V-NEXT:    beq a1, a2, .LBB51_15
6806; CHECK-V-NEXT:  # %bb.14: # %entry
6807; CHECK-V-NEXT:    slti a1, a1, 0
6808; CHECK-V-NEXT:    xori a1, a1, 1
6809; CHECK-V-NEXT:    beqz a1, .LBB51_16
6810; CHECK-V-NEXT:    j .LBB51_17
6811; CHECK-V-NEXT:  .LBB51_15:
6812; CHECK-V-NEXT:    sltu a1, a3, a0
6813; CHECK-V-NEXT:    bnez a1, .LBB51_17
6814; CHECK-V-NEXT:  .LBB51_16: # %entry
6815; CHECK-V-NEXT:    mv a0, a3
6816; CHECK-V-NEXT:  .LBB51_17: # %entry
6817; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
6818; CHECK-V-NEXT:    vmv.s.x v9, a0
6819; CHECK-V-NEXT:    vmv.s.x v8, s0
6820; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
6821; CHECK-V-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
6822; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
6823; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
6824; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
6825; CHECK-V-NEXT:    .cfi_restore ra
6826; CHECK-V-NEXT:    .cfi_restore s0
6827; CHECK-V-NEXT:    .cfi_restore s1
6828; CHECK-V-NEXT:    .cfi_restore s2
6829; CHECK-V-NEXT:    addi sp, sp, 32
6830; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
6831; CHECK-V-NEXT:    ret
6832entry:
6833  %conv = fptosi <2 x half> %x to <2 x i128>
6834  %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 9223372036854775807, i128 9223372036854775807>)
6835  %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> <i128 -9223372036854775808, i128 -9223372036854775808>)
6836  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
6837  ret <2 x i64> %conv6
6838}
6839
6840define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
6841; CHECK-NOV-LABEL: utesth_f16i64_mm:
6842; CHECK-NOV:       # %bb.0: # %entry
6843; CHECK-NOV-NEXT:    addi sp, sp, -32
6844; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
6845; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6846; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6847; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6848; CHECK-NOV-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
6849; CHECK-NOV-NEXT:    .cfi_offset ra, -8
6850; CHECK-NOV-NEXT:    .cfi_offset s0, -16
6851; CHECK-NOV-NEXT:    .cfi_offset s1, -24
6852; CHECK-NOV-NEXT:    .cfi_offset s2, -32
6853; CHECK-NOV-NEXT:    mv s0, a0
6854; CHECK-NOV-NEXT:    fmv.w.x fa0, a1
6855; CHECK-NOV-NEXT:    call __extendhfsf2
6856; CHECK-NOV-NEXT:    call __fixunssfti
6857; CHECK-NOV-NEXT:    mv s1, a0
6858; CHECK-NOV-NEXT:    mv s2, a1
6859; CHECK-NOV-NEXT:    fmv.w.x fa0, s0
6860; CHECK-NOV-NEXT:    call __extendhfsf2
6861; CHECK-NOV-NEXT:    call __fixunssfti
6862; CHECK-NOV-NEXT:    snez a1, a1
6863; CHECK-NOV-NEXT:    snez a2, s2
6864; CHECK-NOV-NEXT:    addi a1, a1, -1
6865; CHECK-NOV-NEXT:    addi a2, a2, -1
6866; CHECK-NOV-NEXT:    and a0, a1, a0
6867; CHECK-NOV-NEXT:    and a1, a2, s1
6868; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
6869; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
6870; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
6871; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
6872; CHECK-NOV-NEXT:    .cfi_restore ra
6873; CHECK-NOV-NEXT:    .cfi_restore s0
6874; CHECK-NOV-NEXT:    .cfi_restore s1
6875; CHECK-NOV-NEXT:    .cfi_restore s2
6876; CHECK-NOV-NEXT:    addi sp, sp, 32
6877; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
6878; CHECK-NOV-NEXT:    ret
6879;
6880; CHECK-V-LABEL: utesth_f16i64_mm:
6881; CHECK-V:       # %bb.0: # %entry
6882; CHECK-V-NEXT:    addi sp, sp, -32
6883; CHECK-V-NEXT:    .cfi_def_cfa_offset 32
6884; CHECK-V-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6885; CHECK-V-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6886; CHECK-V-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6887; CHECK-V-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
6888; CHECK-V-NEXT:    .cfi_offset ra, -8
6889; CHECK-V-NEXT:    .cfi_offset s0, -16
6890; CHECK-V-NEXT:    .cfi_offset s1, -24
6891; CHECK-V-NEXT:    .cfi_offset s2, -32
6892; CHECK-V-NEXT:    mv s0, a0
6893; CHECK-V-NEXT:    fmv.w.x fa0, a1
6894; CHECK-V-NEXT:    call __extendhfsf2
6895; CHECK-V-NEXT:    call __fixunssfti
6896; CHECK-V-NEXT:    mv s1, a0
6897; CHECK-V-NEXT:    mv s2, a1
6898; CHECK-V-NEXT:    fmv.w.x fa0, s0
6899; CHECK-V-NEXT:    call __extendhfsf2
6900; CHECK-V-NEXT:    call __fixunssfti
6901; CHECK-V-NEXT:    snez a1, a1
6902; CHECK-V-NEXT:    snez a2, s2
6903; CHECK-V-NEXT:    addi a1, a1, -1
6904; CHECK-V-NEXT:    addi a2, a2, -1
6905; CHECK-V-NEXT:    and a0, a1, a0
6906; CHECK-V-NEXT:    and a2, a2, s1
6907; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
6908; CHECK-V-NEXT:    vmv.s.x v9, a2
6909; CHECK-V-NEXT:    vmv.s.x v8, a0
6910; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
6911; CHECK-V-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
6912; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
6913; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
6914; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
6915; CHECK-V-NEXT:    .cfi_restore ra
6916; CHECK-V-NEXT:    .cfi_restore s0
6917; CHECK-V-NEXT:    .cfi_restore s1
6918; CHECK-V-NEXT:    .cfi_restore s2
6919; CHECK-V-NEXT:    addi sp, sp, 32
6920; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
6921; CHECK-V-NEXT:    ret
6922entry:
6923  %conv = fptoui <2 x half> %x to <2 x i128>
6924  %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
6925  %conv6 = trunc <2 x i128> %spec.store.select to <2 x i64>
6926  ret <2 x i64> %conv6
6927}
6928
6929define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
6930; CHECK-NOV-LABEL: ustest_f16i64_mm:
6931; CHECK-NOV:       # %bb.0: # %entry
6932; CHECK-NOV-NEXT:    addi sp, sp, -32
6933; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 32
6934; CHECK-NOV-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6935; CHECK-NOV-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6936; CHECK-NOV-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6937; CHECK-NOV-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
6938; CHECK-NOV-NEXT:    .cfi_offset ra, -8
6939; CHECK-NOV-NEXT:    .cfi_offset s0, -16
6940; CHECK-NOV-NEXT:    .cfi_offset s1, -24
6941; CHECK-NOV-NEXT:    .cfi_offset s2, -32
6942; CHECK-NOV-NEXT:    mv s2, a1
6943; CHECK-NOV-NEXT:    fmv.w.x fa0, a0
6944; CHECK-NOV-NEXT:    call __extendhfsf2
6945; CHECK-NOV-NEXT:    call __fixsfti
6946; CHECK-NOV-NEXT:    mv s0, a0
6947; CHECK-NOV-NEXT:    mv s1, a1
6948; CHECK-NOV-NEXT:    fmv.w.x fa0, s2
6949; CHECK-NOV-NEXT:    call __extendhfsf2
6950; CHECK-NOV-NEXT:    call __fixsfti
6951; CHECK-NOV-NEXT:    mv a2, a1
6952; CHECK-NOV-NEXT:    blez a1, .LBB53_2
6953; CHECK-NOV-NEXT:  # %bb.1: # %entry
6954; CHECK-NOV-NEXT:    li a2, 1
6955; CHECK-NOV-NEXT:  .LBB53_2: # %entry
6956; CHECK-NOV-NEXT:    mv a3, s1
6957; CHECK-NOV-NEXT:    blez s1, .LBB53_4
6958; CHECK-NOV-NEXT:  # %bb.3: # %entry
6959; CHECK-NOV-NEXT:    li a3, 1
6960; CHECK-NOV-NEXT:  .LBB53_4: # %entry
6961; CHECK-NOV-NEXT:    slti a1, a1, 1
6962; CHECK-NOV-NEXT:    slti a4, s1, 1
6963; CHECK-NOV-NEXT:    slti a3, a3, 0
6964; CHECK-NOV-NEXT:    slti a2, a2, 0
6965; CHECK-NOV-NEXT:    neg a1, a1
6966; CHECK-NOV-NEXT:    neg a4, a4
6967; CHECK-NOV-NEXT:    addi a3, a3, -1
6968; CHECK-NOV-NEXT:    addi a2, a2, -1
6969; CHECK-NOV-NEXT:    and a1, a1, a0
6970; CHECK-NOV-NEXT:    and a0, a4, s0
6971; CHECK-NOV-NEXT:    and a0, a3, a0
6972; CHECK-NOV-NEXT:    and a1, a2, a1
6973; CHECK-NOV-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
6974; CHECK-NOV-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
6975; CHECK-NOV-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
6976; CHECK-NOV-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
6977; CHECK-NOV-NEXT:    .cfi_restore ra
6978; CHECK-NOV-NEXT:    .cfi_restore s0
6979; CHECK-NOV-NEXT:    .cfi_restore s1
6980; CHECK-NOV-NEXT:    .cfi_restore s2
6981; CHECK-NOV-NEXT:    addi sp, sp, 32
6982; CHECK-NOV-NEXT:    .cfi_def_cfa_offset 0
6983; CHECK-NOV-NEXT:    ret
6984;
6985; CHECK-V-LABEL: ustest_f16i64_mm:
6986; CHECK-V:       # %bb.0: # %entry
6987; CHECK-V-NEXT:    addi sp, sp, -32
6988; CHECK-V-NEXT:    .cfi_def_cfa_offset 32
6989; CHECK-V-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
6990; CHECK-V-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
6991; CHECK-V-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
6992; CHECK-V-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
6993; CHECK-V-NEXT:    .cfi_offset ra, -8
6994; CHECK-V-NEXT:    .cfi_offset s0, -16
6995; CHECK-V-NEXT:    .cfi_offset s1, -24
6996; CHECK-V-NEXT:    .cfi_offset s2, -32
6997; CHECK-V-NEXT:    mv s2, a1
6998; CHECK-V-NEXT:    fmv.w.x fa0, a0
6999; CHECK-V-NEXT:    call __extendhfsf2
7000; CHECK-V-NEXT:    call __fixsfti
7001; CHECK-V-NEXT:    mv s0, a0
7002; CHECK-V-NEXT:    mv s1, a1
7003; CHECK-V-NEXT:    fmv.w.x fa0, s2
7004; CHECK-V-NEXT:    call __extendhfsf2
7005; CHECK-V-NEXT:    call __fixsfti
7006; CHECK-V-NEXT:    mv a2, a1
7007; CHECK-V-NEXT:    blez a1, .LBB53_2
7008; CHECK-V-NEXT:  # %bb.1: # %entry
7009; CHECK-V-NEXT:    li a2, 1
7010; CHECK-V-NEXT:  .LBB53_2: # %entry
7011; CHECK-V-NEXT:    mv a3, s1
7012; CHECK-V-NEXT:    blez s1, .LBB53_4
7013; CHECK-V-NEXT:  # %bb.3: # %entry
7014; CHECK-V-NEXT:    li a3, 1
7015; CHECK-V-NEXT:  .LBB53_4: # %entry
7016; CHECK-V-NEXT:    slti a1, a1, 1
7017; CHECK-V-NEXT:    slti a4, s1, 1
7018; CHECK-V-NEXT:    slti a3, a3, 0
7019; CHECK-V-NEXT:    slti a2, a2, 0
7020; CHECK-V-NEXT:    neg a1, a1
7021; CHECK-V-NEXT:    neg a4, a4
7022; CHECK-V-NEXT:    addi a3, a3, -1
7023; CHECK-V-NEXT:    addi a2, a2, -1
7024; CHECK-V-NEXT:    and a0, a1, a0
7025; CHECK-V-NEXT:    and a4, a4, s0
7026; CHECK-V-NEXT:    and a3, a3, a4
7027; CHECK-V-NEXT:    and a0, a2, a0
7028; CHECK-V-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
7029; CHECK-V-NEXT:    vmv.s.x v9, a0
7030; CHECK-V-NEXT:    vmv.s.x v8, a3
7031; CHECK-V-NEXT:    vslideup.vi v8, v9, 1
7032; CHECK-V-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
7033; CHECK-V-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
7034; CHECK-V-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
7035; CHECK-V-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
7036; CHECK-V-NEXT:    .cfi_restore ra
7037; CHECK-V-NEXT:    .cfi_restore s0
7038; CHECK-V-NEXT:    .cfi_restore s1
7039; CHECK-V-NEXT:    .cfi_restore s2
7040; CHECK-V-NEXT:    addi sp, sp, 32
7041; CHECK-V-NEXT:    .cfi_def_cfa_offset 0
7042; CHECK-V-NEXT:    ret
7043entry:
7044  %conv = fptosi <2 x half> %x to <2 x i128>
7045  %spec.store.select = call <2 x i128> @llvm.smin.v2i128(<2 x i128> %conv, <2 x i128> <i128 18446744073709551616, i128 18446744073709551616>)
7046  %spec.store.select7 = call <2 x i128> @llvm.smax.v2i128(<2 x i128> %spec.store.select, <2 x i128> zeroinitializer)
7047  %conv6 = trunc <2 x i128> %spec.store.select7 to <2 x i64>
7048  ret <2 x i64> %conv6
7049}
7050
7051declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
7052declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
7053declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
7054declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
7055declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
7056declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
7057declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
7058declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
7059declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
7060declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
7061declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
7062declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
7063declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
7064declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
7065declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)
7066declare <2 x i128> @llvm.smin.v2i128(<2 x i128>, <2 x i128>)
7067declare <2 x i128> @llvm.smax.v2i128(<2 x i128>, <2 x i128>)
7068declare <2 x i128> @llvm.umin.v2i128(<2 x i128>, <2 x i128>)
7069