xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfmax.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
8  <vscale x 1 x half>,
9  <vscale x 1 x half>,
10  <vscale x 1 x half>,
11  iXLen);
12
13define <vscale x 1 x half> @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
17; CHECK-NEXT:    vfmax.vv v8, v8, v9
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.nxv1f16(
21    <vscale x 1 x half> undef,
22    <vscale x 1 x half> %0,
23    <vscale x 1 x half> %1,
24    iXLen %2)
25
26  ret <vscale x 1 x half> %a
27}
28
29declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
30  <vscale x 1 x half>,
31  <vscale x 1 x half>,
32  <vscale x 1 x half>,
33  <vscale x 1 x i1>,
34  iXLen,
35  iXLen);
36
37define <vscale x 1 x half> @intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f16_nxv1f16_nxv1f16:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
41; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16(
45    <vscale x 1 x half> %0,
46    <vscale x 1 x half> %1,
47    <vscale x 1 x half> %2,
48    <vscale x 1 x i1> %3,
49    iXLen %4, iXLen 1)
50
51  ret <vscale x 1 x half> %a
52}
53
54declare <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
55  <vscale x 2 x half>,
56  <vscale x 2 x half>,
57  <vscale x 2 x half>,
58  iXLen);
59
60define <vscale x 2 x half> @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
61; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16:
62; CHECK:       # %bb.0: # %entry
63; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
64; CHECK-NEXT:    vfmax.vv v8, v8, v9
65; CHECK-NEXT:    ret
66entry:
67  %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.nxv2f16(
68    <vscale x 2 x half> undef,
69    <vscale x 2 x half> %0,
70    <vscale x 2 x half> %1,
71    iXLen %2)
72
73  ret <vscale x 2 x half> %a
74}
75
76declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
77  <vscale x 2 x half>,
78  <vscale x 2 x half>,
79  <vscale x 2 x half>,
80  <vscale x 2 x i1>,
81  iXLen,
82  iXLen);
83
84define <vscale x 2 x half> @intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f16_nxv2f16_nxv2f16:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
88; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
89; CHECK-NEXT:    ret
90entry:
91  %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16(
92    <vscale x 2 x half> %0,
93    <vscale x 2 x half> %1,
94    <vscale x 2 x half> %2,
95    <vscale x 2 x i1> %3,
96    iXLen %4, iXLen 1)
97
98  ret <vscale x 2 x half> %a
99}
100
101declare <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
102  <vscale x 4 x half>,
103  <vscale x 4 x half>,
104  <vscale x 4 x half>,
105  iXLen);
106
107define <vscale x 4 x half> @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
108; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16:
109; CHECK:       # %bb.0: # %entry
110; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
111; CHECK-NEXT:    vfmax.vv v8, v8, v9
112; CHECK-NEXT:    ret
113entry:
114  %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.nxv4f16(
115    <vscale x 4 x half> undef,
116    <vscale x 4 x half> %0,
117    <vscale x 4 x half> %1,
118    iXLen %2)
119
120  ret <vscale x 4 x half> %a
121}
122
123declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
124  <vscale x 4 x half>,
125  <vscale x 4 x half>,
126  <vscale x 4 x half>,
127  <vscale x 4 x i1>,
128  iXLen,
129  iXLen);
130
131define <vscale x 4 x half> @intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f16_nxv4f16_nxv4f16:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
135; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16(
139    <vscale x 4 x half> %0,
140    <vscale x 4 x half> %1,
141    <vscale x 4 x half> %2,
142    <vscale x 4 x i1> %3,
143    iXLen %4, iXLen 1)
144
145  ret <vscale x 4 x half> %a
146}
147
148declare <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
149  <vscale x 8 x half>,
150  <vscale x 8 x half>,
151  <vscale x 8 x half>,
152  iXLen);
153
154define <vscale x 8 x half> @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
155; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16:
156; CHECK:       # %bb.0: # %entry
157; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
158; CHECK-NEXT:    vfmax.vv v8, v8, v10
159; CHECK-NEXT:    ret
160entry:
161  %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.nxv8f16(
162    <vscale x 8 x half> undef,
163    <vscale x 8 x half> %0,
164    <vscale x 8 x half> %1,
165    iXLen %2)
166
167  ret <vscale x 8 x half> %a
168}
169
170declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
171  <vscale x 8 x half>,
172  <vscale x 8 x half>,
173  <vscale x 8 x half>,
174  <vscale x 8 x i1>,
175  iXLen,
176  iXLen);
177
178define <vscale x 8 x half> @intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f16_nxv8f16_nxv8f16:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
182; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
183; CHECK-NEXT:    ret
184entry:
185  %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16(
186    <vscale x 8 x half> %0,
187    <vscale x 8 x half> %1,
188    <vscale x 8 x half> %2,
189    <vscale x 8 x i1> %3,
190    iXLen %4, iXLen 1)
191
192  ret <vscale x 8 x half> %a
193}
194
195declare <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
196  <vscale x 16 x half>,
197  <vscale x 16 x half>,
198  <vscale x 16 x half>,
199  iXLen);
200
201define <vscale x 16 x half> @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
202; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
205; CHECK-NEXT:    vfmax.vv v8, v8, v12
206; CHECK-NEXT:    ret
207entry:
208  %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.nxv16f16(
209    <vscale x 16 x half> undef,
210    <vscale x 16 x half> %0,
211    <vscale x 16 x half> %1,
212    iXLen %2)
213
214  ret <vscale x 16 x half> %a
215}
216
217declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
218  <vscale x 16 x half>,
219  <vscale x 16 x half>,
220  <vscale x 16 x half>,
221  <vscale x 16 x i1>,
222  iXLen,
223  iXLen);
224
225define <vscale x 16 x half> @intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f16_nxv16f16_nxv16f16:
227; CHECK:       # %bb.0: # %entry
228; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
229; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
230; CHECK-NEXT:    ret
231entry:
232  %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16(
233    <vscale x 16 x half> %0,
234    <vscale x 16 x half> %1,
235    <vscale x 16 x half> %2,
236    <vscale x 16 x i1> %3,
237    iXLen %4, iXLen 1)
238
239  ret <vscale x 16 x half> %a
240}
241
242declare <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
243  <vscale x 32 x half>,
244  <vscale x 32 x half>,
245  <vscale x 32 x half>,
246  iXLen);
247
248define <vscale x 32 x half> @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
249; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16:
250; CHECK:       # %bb.0: # %entry
251; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
252; CHECK-NEXT:    vfmax.vv v8, v8, v16
253; CHECK-NEXT:    ret
254entry:
255  %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.nxv32f16(
256    <vscale x 32 x half> undef,
257    <vscale x 32 x half> %0,
258    <vscale x 32 x half> %1,
259    iXLen %2)
260
261  ret <vscale x 32 x half> %a
262}
263
264declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
265  <vscale x 32 x half>,
266  <vscale x 32 x half>,
267  <vscale x 32 x half>,
268  <vscale x 32 x i1>,
269  iXLen,
270  iXLen);
271
272define <vscale x 32 x half> @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16:
274; CHECK:       # %bb.0: # %entry
275; CHECK-NEXT:    vl8re16.v v24, (a0)
276; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
277; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
278; CHECK-NEXT:    ret
279entry:
280  %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16(
281    <vscale x 32 x half> %0,
282    <vscale x 32 x half> %1,
283    <vscale x 32 x half> %2,
284    <vscale x 32 x i1> %3,
285    iXLen %4, iXLen 1)
286
287  ret <vscale x 32 x half> %a
288}
289
290declare <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
291  <vscale x 1 x float>,
292  <vscale x 1 x float>,
293  <vscale x 1 x float>,
294  iXLen);
295
296define <vscale x 1 x float> @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
297; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32:
298; CHECK:       # %bb.0: # %entry
299; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
300; CHECK-NEXT:    vfmax.vv v8, v8, v9
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32(
304    <vscale x 1 x float> undef,
305    <vscale x 1 x float> %0,
306    <vscale x 1 x float> %1,
307    iXLen %2)
308
309  ret <vscale x 1 x float> %a
310}
311
312declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
313  <vscale x 1 x float>,
314  <vscale x 1 x float>,
315  <vscale x 1 x float>,
316  <vscale x 1 x i1>,
317  iXLen,
318  iXLen);
319
320define <vscale x 1 x float> @intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
321; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f32_nxv1f32_nxv1f32:
322; CHECK:       # %bb.0: # %entry
323; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
324; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
325; CHECK-NEXT:    ret
326entry:
327  %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32(
328    <vscale x 1 x float> %0,
329    <vscale x 1 x float> %1,
330    <vscale x 1 x float> %2,
331    <vscale x 1 x i1> %3,
332    iXLen %4, iXLen 1)
333
334  ret <vscale x 1 x float> %a
335}
336
337declare <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
338  <vscale x 2 x float>,
339  <vscale x 2 x float>,
340  <vscale x 2 x float>,
341  iXLen);
342
343define <vscale x 2 x float> @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
344; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32:
345; CHECK:       # %bb.0: # %entry
346; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
347; CHECK-NEXT:    vfmax.vv v8, v8, v9
348; CHECK-NEXT:    ret
349entry:
350  %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32(
351    <vscale x 2 x float> undef,
352    <vscale x 2 x float> %0,
353    <vscale x 2 x float> %1,
354    iXLen %2)
355
356  ret <vscale x 2 x float> %a
357}
358
359declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
360  <vscale x 2 x float>,
361  <vscale x 2 x float>,
362  <vscale x 2 x float>,
363  <vscale x 2 x i1>,
364  iXLen,
365  iXLen);
366
367define <vscale x 2 x float> @intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
368; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f32_nxv2f32_nxv2f32:
369; CHECK:       # %bb.0: # %entry
370; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
371; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
372; CHECK-NEXT:    ret
373entry:
374  %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32(
375    <vscale x 2 x float> %0,
376    <vscale x 2 x float> %1,
377    <vscale x 2 x float> %2,
378    <vscale x 2 x i1> %3,
379    iXLen %4, iXLen 1)
380
381  ret <vscale x 2 x float> %a
382}
383
384declare <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
385  <vscale x 4 x float>,
386  <vscale x 4 x float>,
387  <vscale x 4 x float>,
388  iXLen);
389
390define <vscale x 4 x float> @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
391; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32:
392; CHECK:       # %bb.0: # %entry
393; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
394; CHECK-NEXT:    vfmax.vv v8, v8, v10
395; CHECK-NEXT:    ret
396entry:
397  %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32(
398    <vscale x 4 x float> undef,
399    <vscale x 4 x float> %0,
400    <vscale x 4 x float> %1,
401    iXLen %2)
402
403  ret <vscale x 4 x float> %a
404}
405
406declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
407  <vscale x 4 x float>,
408  <vscale x 4 x float>,
409  <vscale x 4 x float>,
410  <vscale x 4 x i1>,
411  iXLen,
412  iXLen);
413
414define <vscale x 4 x float> @intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
415; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f32_nxv4f32_nxv4f32:
416; CHECK:       # %bb.0: # %entry
417; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
418; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
419; CHECK-NEXT:    ret
420entry:
421  %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32(
422    <vscale x 4 x float> %0,
423    <vscale x 4 x float> %1,
424    <vscale x 4 x float> %2,
425    <vscale x 4 x i1> %3,
426    iXLen %4, iXLen 1)
427
428  ret <vscale x 4 x float> %a
429}
430
431declare <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
432  <vscale x 8 x float>,
433  <vscale x 8 x float>,
434  <vscale x 8 x float>,
435  iXLen);
436
437define <vscale x 8 x float> @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
438; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32:
439; CHECK:       # %bb.0: # %entry
440; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
441; CHECK-NEXT:    vfmax.vv v8, v8, v12
442; CHECK-NEXT:    ret
443entry:
444  %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32(
445    <vscale x 8 x float> undef,
446    <vscale x 8 x float> %0,
447    <vscale x 8 x float> %1,
448    iXLen %2)
449
450  ret <vscale x 8 x float> %a
451}
452
453declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
454  <vscale x 8 x float>,
455  <vscale x 8 x float>,
456  <vscale x 8 x float>,
457  <vscale x 8 x i1>,
458  iXLen,
459  iXLen);
460
461define <vscale x 8 x float> @intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
462; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f32_nxv8f32_nxv8f32:
463; CHECK:       # %bb.0: # %entry
464; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
465; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
466; CHECK-NEXT:    ret
467entry:
468  %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32(
469    <vscale x 8 x float> %0,
470    <vscale x 8 x float> %1,
471    <vscale x 8 x float> %2,
472    <vscale x 8 x i1> %3,
473    iXLen %4, iXLen 1)
474
475  ret <vscale x 8 x float> %a
476}
477
478declare <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
479  <vscale x 16 x float>,
480  <vscale x 16 x float>,
481  <vscale x 16 x float>,
482  iXLen);
483
484define <vscale x 16 x float> @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
485; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32:
486; CHECK:       # %bb.0: # %entry
487; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
488; CHECK-NEXT:    vfmax.vv v8, v8, v16
489; CHECK-NEXT:    ret
490entry:
491  %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32(
492    <vscale x 16 x float> undef,
493    <vscale x 16 x float> %0,
494    <vscale x 16 x float> %1,
495    iXLen %2)
496
497  ret <vscale x 16 x float> %a
498}
499
500declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
501  <vscale x 16 x float>,
502  <vscale x 16 x float>,
503  <vscale x 16 x float>,
504  <vscale x 16 x i1>,
505  iXLen,
506  iXLen);
507
508define <vscale x 16 x float> @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
509; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32:
510; CHECK:       # %bb.0: # %entry
511; CHECK-NEXT:    vl8re32.v v24, (a0)
512; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
513; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
514; CHECK-NEXT:    ret
515entry:
516  %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32(
517    <vscale x 16 x float> %0,
518    <vscale x 16 x float> %1,
519    <vscale x 16 x float> %2,
520    <vscale x 16 x i1> %3,
521    iXLen %4, iXLen 1)
522
523  ret <vscale x 16 x float> %a
524}
525
526declare <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
527  <vscale x 1 x double>,
528  <vscale x 1 x double>,
529  <vscale x 1 x double>,
530  iXLen);
531
532define <vscale x 1 x double> @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
533; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64:
534; CHECK:       # %bb.0: # %entry
535; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
536; CHECK-NEXT:    vfmax.vv v8, v8, v9
537; CHECK-NEXT:    ret
538entry:
539  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64(
540    <vscale x 1 x double> undef,
541    <vscale x 1 x double> %0,
542    <vscale x 1 x double> %1,
543    iXLen %2)
544
545  ret <vscale x 1 x double> %a
546}
547
548declare <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
549  <vscale x 1 x double>,
550  <vscale x 1 x double>,
551  <vscale x 1 x double>,
552  <vscale x 1 x i1>,
553  iXLen,
554  iXLen);
555
556define <vscale x 1 x double> @intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
557; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1f64_nxv1f64_nxv1f64:
558; CHECK:       # %bb.0: # %entry
559; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
560; CHECK-NEXT:    vfmax.vv v8, v9, v10, v0.t
561; CHECK-NEXT:    ret
562entry:
563  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64(
564    <vscale x 1 x double> %0,
565    <vscale x 1 x double> %1,
566    <vscale x 1 x double> %2,
567    <vscale x 1 x i1> %3,
568    iXLen %4, iXLen 1)
569
570  ret <vscale x 1 x double> %a
571}
572
573declare <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
574  <vscale x 2 x double>,
575  <vscale x 2 x double>,
576  <vscale x 2 x double>,
577  iXLen);
578
579define <vscale x 2 x double> @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
580; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64:
581; CHECK:       # %bb.0: # %entry
582; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
583; CHECK-NEXT:    vfmax.vv v8, v8, v10
584; CHECK-NEXT:    ret
585entry:
586  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64(
587    <vscale x 2 x double> undef,
588    <vscale x 2 x double> %0,
589    <vscale x 2 x double> %1,
590    iXLen %2)
591
592  ret <vscale x 2 x double> %a
593}
594
595declare <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
596  <vscale x 2 x double>,
597  <vscale x 2 x double>,
598  <vscale x 2 x double>,
599  <vscale x 2 x i1>,
600  iXLen,
601  iXLen);
602
603define <vscale x 2 x double> @intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
604; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2f64_nxv2f64_nxv2f64:
605; CHECK:       # %bb.0: # %entry
606; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
607; CHECK-NEXT:    vfmax.vv v8, v10, v12, v0.t
608; CHECK-NEXT:    ret
609entry:
610  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64(
611    <vscale x 2 x double> %0,
612    <vscale x 2 x double> %1,
613    <vscale x 2 x double> %2,
614    <vscale x 2 x i1> %3,
615    iXLen %4, iXLen 1)
616
617  ret <vscale x 2 x double> %a
618}
619
620declare <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
621  <vscale x 4 x double>,
622  <vscale x 4 x double>,
623  <vscale x 4 x double>,
624  iXLen);
625
626define <vscale x 4 x double> @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
627; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64:
628; CHECK:       # %bb.0: # %entry
629; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
630; CHECK-NEXT:    vfmax.vv v8, v8, v12
631; CHECK-NEXT:    ret
632entry:
633  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64(
634    <vscale x 4 x double> undef,
635    <vscale x 4 x double> %0,
636    <vscale x 4 x double> %1,
637    iXLen %2)
638
639  ret <vscale x 4 x double> %a
640}
641
642declare <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
643  <vscale x 4 x double>,
644  <vscale x 4 x double>,
645  <vscale x 4 x double>,
646  <vscale x 4 x i1>,
647  iXLen,
648  iXLen);
649
650define <vscale x 4 x double> @intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
651; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4f64_nxv4f64_nxv4f64:
652; CHECK:       # %bb.0: # %entry
653; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
654; CHECK-NEXT:    vfmax.vv v8, v12, v16, v0.t
655; CHECK-NEXT:    ret
656entry:
657  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64(
658    <vscale x 4 x double> %0,
659    <vscale x 4 x double> %1,
660    <vscale x 4 x double> %2,
661    <vscale x 4 x i1> %3,
662    iXLen %4, iXLen 1)
663
664  ret <vscale x 4 x double> %a
665}
666
667declare <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
668  <vscale x 8 x double>,
669  <vscale x 8 x double>,
670  <vscale x 8 x double>,
671  iXLen);
672
673define <vscale x 8 x double> @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
674; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64:
675; CHECK:       # %bb.0: # %entry
676; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
677; CHECK-NEXT:    vfmax.vv v8, v8, v16
678; CHECK-NEXT:    ret
679entry:
680  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64(
681    <vscale x 8 x double> undef,
682    <vscale x 8 x double> %0,
683    <vscale x 8 x double> %1,
684    iXLen %2)
685
686  ret <vscale x 8 x double> %a
687}
688
689declare <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
690  <vscale x 8 x double>,
691  <vscale x 8 x double>,
692  <vscale x 8 x double>,
693  <vscale x 8 x i1>,
694  iXLen,
695  iXLen);
696
697define <vscale x 8 x double> @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
698; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64:
699; CHECK:       # %bb.0: # %entry
700; CHECK-NEXT:    vl8re64.v v24, (a0)
701; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
702; CHECK-NEXT:    vfmax.vv v8, v16, v24, v0.t
703; CHECK-NEXT:    ret
704entry:
705  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64(
706    <vscale x 8 x double> %0,
707    <vscale x 8 x double> %1,
708    <vscale x 8 x double> %2,
709    <vscale x 8 x i1> %3,
710    iXLen %4, iXLen 1)
711
712  ret <vscale x 8 x double> %a
713}
714
715declare <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
716  <vscale x 1 x half>,
717  <vscale x 1 x half>,
718  half,
719  iXLen);
720
721define <vscale x 1 x half> @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
722; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16:
723; CHECK:       # %bb.0: # %entry
724; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
725; CHECK-NEXT:    vfmax.vf v8, v8, fa0
726; CHECK-NEXT:    ret
727entry:
728  %a = call <vscale x 1 x half> @llvm.riscv.vfmax.nxv1f16.f16(
729    <vscale x 1 x half> undef,
730    <vscale x 1 x half> %0,
731    half %1,
732    iXLen %2)
733
734  ret <vscale x 1 x half> %a
735}
736
737declare <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
738  <vscale x 1 x half>,
739  <vscale x 1 x half>,
740  half,
741  <vscale x 1 x i1>,
742  iXLen,
743  iXLen);
744
745define <vscale x 1 x half> @intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
746; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f16_nxv1f16_f16:
747; CHECK:       # %bb.0: # %entry
748; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
749; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
750; CHECK-NEXT:    ret
751entry:
752  %a = call <vscale x 1 x half> @llvm.riscv.vfmax.mask.nxv1f16.f16(
753    <vscale x 1 x half> %0,
754    <vscale x 1 x half> %1,
755    half %2,
756    <vscale x 1 x i1> %3,
757    iXLen %4, iXLen 1)
758
759  ret <vscale x 1 x half> %a
760}
761
762declare <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
763  <vscale x 2 x half>,
764  <vscale x 2 x half>,
765  half,
766  iXLen);
767
768define <vscale x 2 x half> @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
769; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16:
770; CHECK:       # %bb.0: # %entry
771; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
772; CHECK-NEXT:    vfmax.vf v8, v8, fa0
773; CHECK-NEXT:    ret
774entry:
775  %a = call <vscale x 2 x half> @llvm.riscv.vfmax.nxv2f16.f16(
776    <vscale x 2 x half> undef,
777    <vscale x 2 x half> %0,
778    half %1,
779    iXLen %2)
780
781  ret <vscale x 2 x half> %a
782}
783
784declare <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
785  <vscale x 2 x half>,
786  <vscale x 2 x half>,
787  half,
788  <vscale x 2 x i1>,
789  iXLen,
790  iXLen);
791
792define <vscale x 2 x half> @intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
793; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f16_nxv2f16_f16:
794; CHECK:       # %bb.0: # %entry
795; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
796; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
797; CHECK-NEXT:    ret
798entry:
799  %a = call <vscale x 2 x half> @llvm.riscv.vfmax.mask.nxv2f16.f16(
800    <vscale x 2 x half> %0,
801    <vscale x 2 x half> %1,
802    half %2,
803    <vscale x 2 x i1> %3,
804    iXLen %4, iXLen 1)
805
806  ret <vscale x 2 x half> %a
807}
808
809declare <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
810  <vscale x 4 x half>,
811  <vscale x 4 x half>,
812  half,
813  iXLen);
814
815define <vscale x 4 x half> @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
816; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16:
817; CHECK:       # %bb.0: # %entry
818; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
819; CHECK-NEXT:    vfmax.vf v8, v8, fa0
820; CHECK-NEXT:    ret
821entry:
822  %a = call <vscale x 4 x half> @llvm.riscv.vfmax.nxv4f16.f16(
823    <vscale x 4 x half> undef,
824    <vscale x 4 x half> %0,
825    half %1,
826    iXLen %2)
827
828  ret <vscale x 4 x half> %a
829}
830
831declare <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
832  <vscale x 4 x half>,
833  <vscale x 4 x half>,
834  half,
835  <vscale x 4 x i1>,
836  iXLen,
837  iXLen);
838
839define <vscale x 4 x half> @intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
840; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f16_nxv4f16_f16:
841; CHECK:       # %bb.0: # %entry
842; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
843; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
844; CHECK-NEXT:    ret
845entry:
846  %a = call <vscale x 4 x half> @llvm.riscv.vfmax.mask.nxv4f16.f16(
847    <vscale x 4 x half> %0,
848    <vscale x 4 x half> %1,
849    half %2,
850    <vscale x 4 x i1> %3,
851    iXLen %4, iXLen 1)
852
853  ret <vscale x 4 x half> %a
854}
855
856declare <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
857  <vscale x 8 x half>,
858  <vscale x 8 x half>,
859  half,
860  iXLen);
861
862define <vscale x 8 x half> @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
863; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16:
864; CHECK:       # %bb.0: # %entry
865; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
866; CHECK-NEXT:    vfmax.vf v8, v8, fa0
867; CHECK-NEXT:    ret
868entry:
869  %a = call <vscale x 8 x half> @llvm.riscv.vfmax.nxv8f16.f16(
870    <vscale x 8 x half> undef,
871    <vscale x 8 x half> %0,
872    half %1,
873    iXLen %2)
874
875  ret <vscale x 8 x half> %a
876}
877
878declare <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
879  <vscale x 8 x half>,
880  <vscale x 8 x half>,
881  half,
882  <vscale x 8 x i1>,
883  iXLen,
884  iXLen);
885
886define <vscale x 8 x half> @intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
887; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f16_nxv8f16_f16:
888; CHECK:       # %bb.0: # %entry
889; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
890; CHECK-NEXT:    vfmax.vf v8, v10, fa0, v0.t
891; CHECK-NEXT:    ret
892entry:
893  %a = call <vscale x 8 x half> @llvm.riscv.vfmax.mask.nxv8f16.f16(
894    <vscale x 8 x half> %0,
895    <vscale x 8 x half> %1,
896    half %2,
897    <vscale x 8 x i1> %3,
898    iXLen %4, iXLen 1)
899
900  ret <vscale x 8 x half> %a
901}
902
903declare <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
904  <vscale x 16 x half>,
905  <vscale x 16 x half>,
906  half,
907  iXLen);
908
909define <vscale x 16 x half> @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
910; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16:
911; CHECK:       # %bb.0: # %entry
912; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
913; CHECK-NEXT:    vfmax.vf v8, v8, fa0
914; CHECK-NEXT:    ret
915entry:
916  %a = call <vscale x 16 x half> @llvm.riscv.vfmax.nxv16f16.f16(
917    <vscale x 16 x half> undef,
918    <vscale x 16 x half> %0,
919    half %1,
920    iXLen %2)
921
922  ret <vscale x 16 x half> %a
923}
924
925declare <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
926  <vscale x 16 x half>,
927  <vscale x 16 x half>,
928  half,
929  <vscale x 16 x i1>,
930  iXLen,
931  iXLen);
932
933define <vscale x 16 x half> @intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
934; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f16_nxv16f16_f16:
935; CHECK:       # %bb.0: # %entry
936; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
937; CHECK-NEXT:    vfmax.vf v8, v12, fa0, v0.t
938; CHECK-NEXT:    ret
939entry:
940  %a = call <vscale x 16 x half> @llvm.riscv.vfmax.mask.nxv16f16.f16(
941    <vscale x 16 x half> %0,
942    <vscale x 16 x half> %1,
943    half %2,
944    <vscale x 16 x i1> %3,
945    iXLen %4, iXLen 1)
946
947  ret <vscale x 16 x half> %a
948}
949
950declare <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
951  <vscale x 32 x half>,
952  <vscale x 32 x half>,
953  half,
954  iXLen);
955
956define <vscale x 32 x half> @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
957; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16:
958; CHECK:       # %bb.0: # %entry
959; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
960; CHECK-NEXT:    vfmax.vf v8, v8, fa0
961; CHECK-NEXT:    ret
962entry:
963  %a = call <vscale x 32 x half> @llvm.riscv.vfmax.nxv32f16.f16(
964    <vscale x 32 x half> undef,
965    <vscale x 32 x half> %0,
966    half %1,
967    iXLen %2)
968
969  ret <vscale x 32 x half> %a
970}
971
972declare <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
973  <vscale x 32 x half>,
974  <vscale x 32 x half>,
975  half,
976  <vscale x 32 x i1>,
977  iXLen,
978  iXLen);
979
980define <vscale x 32 x half> @intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
981; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32f16_nxv32f16_f16:
982; CHECK:       # %bb.0: # %entry
983; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
984; CHECK-NEXT:    vfmax.vf v8, v16, fa0, v0.t
985; CHECK-NEXT:    ret
986entry:
987  %a = call <vscale x 32 x half> @llvm.riscv.vfmax.mask.nxv32f16.f16(
988    <vscale x 32 x half> %0,
989    <vscale x 32 x half> %1,
990    half %2,
991    <vscale x 32 x i1> %3,
992    iXLen %4, iXLen 1)
993
994  ret <vscale x 32 x half> %a
995}
996
997declare <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
998  <vscale x 1 x float>,
999  <vscale x 1 x float>,
1000  float,
1001  iXLen);
1002
1003define <vscale x 1 x float> @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
1004; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32:
1005; CHECK:       # %bb.0: # %entry
1006; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1007; CHECK-NEXT:    vfmax.vf v8, v8, fa0
1008; CHECK-NEXT:    ret
1009entry:
1010  %a = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32(
1011    <vscale x 1 x float> undef,
1012    <vscale x 1 x float> %0,
1013    float %1,
1014    iXLen %2)
1015
1016  ret <vscale x 1 x float> %a
1017}
1018
1019declare <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
1020  <vscale x 1 x float>,
1021  <vscale x 1 x float>,
1022  float,
1023  <vscale x 1 x i1>,
1024  iXLen,
1025  iXLen);
1026
1027define <vscale x 1 x float> @intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1028; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f32_nxv1f32_f32:
1029; CHECK:       # %bb.0: # %entry
1030; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1031; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
1032; CHECK-NEXT:    ret
1033entry:
1034  %a = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32(
1035    <vscale x 1 x float> %0,
1036    <vscale x 1 x float> %1,
1037    float %2,
1038    <vscale x 1 x i1> %3,
1039    iXLen %4, iXLen 1)
1040
1041  ret <vscale x 1 x float> %a
1042}
1043
1044declare <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
1045  <vscale x 2 x float>,
1046  <vscale x 2 x float>,
1047  float,
1048  iXLen);
1049
1050define <vscale x 2 x float> @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
1051; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32:
1052; CHECK:       # %bb.0: # %entry
1053; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1054; CHECK-NEXT:    vfmax.vf v8, v8, fa0
1055; CHECK-NEXT:    ret
1056entry:
1057  %a = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32(
1058    <vscale x 2 x float> undef,
1059    <vscale x 2 x float> %0,
1060    float %1,
1061    iXLen %2)
1062
1063  ret <vscale x 2 x float> %a
1064}
1065
1066declare <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
1067  <vscale x 2 x float>,
1068  <vscale x 2 x float>,
1069  float,
1070  <vscale x 2 x i1>,
1071  iXLen,
1072  iXLen);
1073
1074define <vscale x 2 x float> @intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1075; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f32_nxv2f32_f32:
1076; CHECK:       # %bb.0: # %entry
1077; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1078; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
1079; CHECK-NEXT:    ret
1080entry:
1081  %a = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32(
1082    <vscale x 2 x float> %0,
1083    <vscale x 2 x float> %1,
1084    float %2,
1085    <vscale x 2 x i1> %3,
1086    iXLen %4, iXLen 1)
1087
1088  ret <vscale x 2 x float> %a
1089}
1090
1091declare <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
1092  <vscale x 4 x float>,
1093  <vscale x 4 x float>,
1094  float,
1095  iXLen);
1096
1097define <vscale x 4 x float> @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
1098; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32:
1099; CHECK:       # %bb.0: # %entry
1100; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1101; CHECK-NEXT:    vfmax.vf v8, v8, fa0
1102; CHECK-NEXT:    ret
1103entry:
1104  %a = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32(
1105    <vscale x 4 x float> undef,
1106    <vscale x 4 x float> %0,
1107    float %1,
1108    iXLen %2)
1109
1110  ret <vscale x 4 x float> %a
1111}
1112
1113declare <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
1114  <vscale x 4 x float>,
1115  <vscale x 4 x float>,
1116  float,
1117  <vscale x 4 x i1>,
1118  iXLen,
1119  iXLen);
1120
1121define <vscale x 4 x float> @intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1122; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f32_nxv4f32_f32:
1123; CHECK:       # %bb.0: # %entry
1124; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1125; CHECK-NEXT:    vfmax.vf v8, v10, fa0, v0.t
1126; CHECK-NEXT:    ret
1127entry:
1128  %a = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32(
1129    <vscale x 4 x float> %0,
1130    <vscale x 4 x float> %1,
1131    float %2,
1132    <vscale x 4 x i1> %3,
1133    iXLen %4, iXLen 1)
1134
1135  ret <vscale x 4 x float> %a
1136}
1137
1138declare <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
1139  <vscale x 8 x float>,
1140  <vscale x 8 x float>,
1141  float,
1142  iXLen);
1143
1144define <vscale x 8 x float> @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
1145; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32:
1146; CHECK:       # %bb.0: # %entry
1147; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1148; CHECK-NEXT:    vfmax.vf v8, v8, fa0
1149; CHECK-NEXT:    ret
1150entry:
1151  %a = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32(
1152    <vscale x 8 x float> undef,
1153    <vscale x 8 x float> %0,
1154    float %1,
1155    iXLen %2)
1156
1157  ret <vscale x 8 x float> %a
1158}
1159
1160declare <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
1161  <vscale x 8 x float>,
1162  <vscale x 8 x float>,
1163  float,
1164  <vscale x 8 x i1>,
1165  iXLen,
1166  iXLen);
1167
1168define <vscale x 8 x float> @intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1169; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f32_nxv8f32_f32:
1170; CHECK:       # %bb.0: # %entry
1171; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1172; CHECK-NEXT:    vfmax.vf v8, v12, fa0, v0.t
1173; CHECK-NEXT:    ret
1174entry:
1175  %a = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32(
1176    <vscale x 8 x float> %0,
1177    <vscale x 8 x float> %1,
1178    float %2,
1179    <vscale x 8 x i1> %3,
1180    iXLen %4, iXLen 1)
1181
1182  ret <vscale x 8 x float> %a
1183}
1184
1185declare <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
1186  <vscale x 16 x float>,
1187  <vscale x 16 x float>,
1188  float,
1189  iXLen);
1190
1191define <vscale x 16 x float> @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
1192; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32:
1193; CHECK:       # %bb.0: # %entry
1194; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1195; CHECK-NEXT:    vfmax.vf v8, v8, fa0
1196; CHECK-NEXT:    ret
1197entry:
1198  %a = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32(
1199    <vscale x 16 x float> undef,
1200    <vscale x 16 x float> %0,
1201    float %1,
1202    iXLen %2)
1203
1204  ret <vscale x 16 x float> %a
1205}
1206
1207declare <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
1208  <vscale x 16 x float>,
1209  <vscale x 16 x float>,
1210  float,
1211  <vscale x 16 x i1>,
1212  iXLen,
1213  iXLen);
1214
1215define <vscale x 16 x float> @intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1216; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16f32_nxv16f32_f32:
1217; CHECK:       # %bb.0: # %entry
1218; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
1219; CHECK-NEXT:    vfmax.vf v8, v16, fa0, v0.t
1220; CHECK-NEXT:    ret
1221entry:
1222  %a = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32(
1223    <vscale x 16 x float> %0,
1224    <vscale x 16 x float> %1,
1225    float %2,
1226    <vscale x 16 x i1> %3,
1227    iXLen %4, iXLen 1)
1228
1229  ret <vscale x 16 x float> %a
1230}
1231
1232declare <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
1233  <vscale x 1 x double>,
1234  <vscale x 1 x double>,
1235  double,
1236  iXLen);
1237
1238define <vscale x 1 x double> @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
1239; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64:
1240; CHECK:       # %bb.0: # %entry
1241; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1242; CHECK-NEXT:    vfmax.vf v8, v8, fa0
1243; CHECK-NEXT:    ret
1244entry:
1245  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64(
1246    <vscale x 1 x double> undef,
1247    <vscale x 1 x double> %0,
1248    double %1,
1249    iXLen %2)
1250
1251  ret <vscale x 1 x double> %a
1252}
1253
1254declare <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
1255  <vscale x 1 x double>,
1256  <vscale x 1 x double>,
1257  double,
1258  <vscale x 1 x i1>,
1259  iXLen,
1260  iXLen);
1261
1262define <vscale x 1 x double> @intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1263; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1f64_nxv1f64_f64:
1264; CHECK:       # %bb.0: # %entry
1265; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
1266; CHECK-NEXT:    vfmax.vf v8, v9, fa0, v0.t
1267; CHECK-NEXT:    ret
1268entry:
1269  %a = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64(
1270    <vscale x 1 x double> %0,
1271    <vscale x 1 x double> %1,
1272    double %2,
1273    <vscale x 1 x i1> %3,
1274    iXLen %4, iXLen 1)
1275
1276  ret <vscale x 1 x double> %a
1277}
1278
1279declare <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
1280  <vscale x 2 x double>,
1281  <vscale x 2 x double>,
1282  double,
1283  iXLen);
1284
1285define <vscale x 2 x double> @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
1286; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64:
1287; CHECK:       # %bb.0: # %entry
1288; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1289; CHECK-NEXT:    vfmax.vf v8, v8, fa0
1290; CHECK-NEXT:    ret
1291entry:
1292  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64(
1293    <vscale x 2 x double> undef,
1294    <vscale x 2 x double> %0,
1295    double %1,
1296    iXLen %2)
1297
1298  ret <vscale x 2 x double> %a
1299}
1300
1301declare <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
1302  <vscale x 2 x double>,
1303  <vscale x 2 x double>,
1304  double,
1305  <vscale x 2 x i1>,
1306  iXLen,
1307  iXLen);
1308
1309define <vscale x 2 x double> @intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1310; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2f64_nxv2f64_f64:
1311; CHECK:       # %bb.0: # %entry
1312; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
1313; CHECK-NEXT:    vfmax.vf v8, v10, fa0, v0.t
1314; CHECK-NEXT:    ret
1315entry:
1316  %a = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64(
1317    <vscale x 2 x double> %0,
1318    <vscale x 2 x double> %1,
1319    double %2,
1320    <vscale x 2 x i1> %3,
1321    iXLen %4, iXLen 1)
1322
1323  ret <vscale x 2 x double> %a
1324}
1325
1326declare <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
1327  <vscale x 4 x double>,
1328  <vscale x 4 x double>,
1329  double,
1330  iXLen);
1331
1332define <vscale x 4 x double> @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
1333; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64:
1334; CHECK:       # %bb.0: # %entry
1335; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1336; CHECK-NEXT:    vfmax.vf v8, v8, fa0
1337; CHECK-NEXT:    ret
1338entry:
1339  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64(
1340    <vscale x 4 x double> undef,
1341    <vscale x 4 x double> %0,
1342    double %1,
1343    iXLen %2)
1344
1345  ret <vscale x 4 x double> %a
1346}
1347
1348declare <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
1349  <vscale x 4 x double>,
1350  <vscale x 4 x double>,
1351  double,
1352  <vscale x 4 x i1>,
1353  iXLen,
1354  iXLen);
1355
1356define <vscale x 4 x double> @intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1357; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4f64_nxv4f64_f64:
1358; CHECK:       # %bb.0: # %entry
1359; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1360; CHECK-NEXT:    vfmax.vf v8, v12, fa0, v0.t
1361; CHECK-NEXT:    ret
1362entry:
1363  %a = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64(
1364    <vscale x 4 x double> %0,
1365    <vscale x 4 x double> %1,
1366    double %2,
1367    <vscale x 4 x i1> %3,
1368    iXLen %4, iXLen 1)
1369
1370  ret <vscale x 4 x double> %a
1371}
1372
1373declare <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
1374  <vscale x 8 x double>,
1375  <vscale x 8 x double>,
1376  double,
1377  iXLen);
1378
1379define <vscale x 8 x double> @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
1380; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64:
1381; CHECK:       # %bb.0: # %entry
1382; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1383; CHECK-NEXT:    vfmax.vf v8, v8, fa0
1384; CHECK-NEXT:    ret
1385entry:
1386  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64(
1387    <vscale x 8 x double> undef,
1388    <vscale x 8 x double> %0,
1389    double %1,
1390    iXLen %2)
1391
1392  ret <vscale x 8 x double> %a
1393}
1394
1395declare <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
1396  <vscale x 8 x double>,
1397  <vscale x 8 x double>,
1398  double,
1399  <vscale x 8 x i1>,
1400  iXLen,
1401  iXLen);
1402
1403define <vscale x 8 x double> @intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1404; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8f64_nxv8f64_f64:
1405; CHECK:       # %bb.0: # %entry
1406; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
1407; CHECK-NEXT:    vfmax.vf v8, v16, fa0, v0.t
1408; CHECK-NEXT:    ret
1409entry:
1410  %a = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64(
1411    <vscale x 8 x double> %0,
1412    <vscale x 8 x double> %1,
1413    double %2,
1414    <vscale x 8 x i1> %3,
1415    iXLen %4, iXLen 1)
1416
1417  ret <vscale x 8 x double> %a
1418}
1419