xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
8  <vscale x 4 x half>,
9  <vscale x 1 x half>,
10  <vscale x 4 x half>,
11  iXLen);
12
13define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
14; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
17; CHECK-NEXT:    vfredmax.vs v8, v9, v10
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
21    <vscale x 4 x half> %0,
22    <vscale x 1 x half> %1,
23    <vscale x 4 x half> %2,
24    iXLen %3)
25
26  ret <vscale x 4 x half> %a
27}
28
29declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
30  <vscale x 4 x half>,
31  <vscale x 1 x half>,
32  <vscale x 4 x half>,
33  <vscale x 1 x i1>,
34  iXLen);
35
36define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
40; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
41; CHECK-NEXT:    ret
42entry:
43  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
44    <vscale x 4 x half> %0,
45    <vscale x 1 x half> %1,
46    <vscale x 4 x half> %2,
47    <vscale x 1 x i1> %3,
48    iXLen %4)
49
50  ret <vscale x 4 x half> %a
51}
52
53declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
54  <vscale x 4 x half>,
55  <vscale x 2 x half>,
56  <vscale x 4 x half>,
57  iXLen);
58
59define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
60; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
63; CHECK-NEXT:    vfredmax.vs v8, v9, v10
64; CHECK-NEXT:    ret
65entry:
66  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
67    <vscale x 4 x half> %0,
68    <vscale x 2 x half> %1,
69    <vscale x 4 x half> %2,
70    iXLen %3)
71
72  ret <vscale x 4 x half> %a
73}
74
75declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
76  <vscale x 4 x half>,
77  <vscale x 2 x half>,
78  <vscale x 4 x half>,
79  <vscale x 2 x i1>,
80  iXLen);
81
82define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
86; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
87; CHECK-NEXT:    ret
88entry:
89  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
90    <vscale x 4 x half> %0,
91    <vscale x 2 x half> %1,
92    <vscale x 4 x half> %2,
93    <vscale x 2 x i1> %3,
94    iXLen %4)
95
96  ret <vscale x 4 x half> %a
97}
98
99declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
100  <vscale x 4 x half>,
101  <vscale x 4 x half>,
102  <vscale x 4 x half>,
103  iXLen);
104
105define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
106; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16:
107; CHECK:       # %bb.0: # %entry
108; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
109; CHECK-NEXT:    vfredmax.vs v8, v9, v10
110; CHECK-NEXT:    ret
111entry:
112  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
113    <vscale x 4 x half> %0,
114    <vscale x 4 x half> %1,
115    <vscale x 4 x half> %2,
116    iXLen %3)
117
118  ret <vscale x 4 x half> %a
119}
120
121declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
122  <vscale x 4 x half>,
123  <vscale x 4 x half>,
124  <vscale x 4 x half>,
125  <vscale x 4 x i1>,
126  iXLen);
127
128define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16:
130; CHECK:       # %bb.0: # %entry
131; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
132; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
133; CHECK-NEXT:    ret
134entry:
135  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
136    <vscale x 4 x half> %0,
137    <vscale x 4 x half> %1,
138    <vscale x 4 x half> %2,
139    <vscale x 4 x i1> %3,
140    iXLen %4)
141
142  ret <vscale x 4 x half> %a
143}
144
145declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
146  <vscale x 4 x half>,
147  <vscale x 8 x half>,
148  <vscale x 4 x half>,
149  iXLen);
150
151define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
152; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16:
153; CHECK:       # %bb.0: # %entry
154; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
155; CHECK-NEXT:    vfredmax.vs v8, v10, v9
156; CHECK-NEXT:    ret
157entry:
158  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
159    <vscale x 4 x half> %0,
160    <vscale x 8 x half> %1,
161    <vscale x 4 x half> %2,
162    iXLen %3)
163
164  ret <vscale x 4 x half> %a
165}
166
167declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
168  <vscale x 4 x half>,
169  <vscale x 8 x half>,
170  <vscale x 4 x half>,
171  <vscale x 8 x i1>,
172  iXLen);
173
174define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16:
176; CHECK:       # %bb.0: # %entry
177; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
178; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
179; CHECK-NEXT:    ret
180entry:
181  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
182    <vscale x 4 x half> %0,
183    <vscale x 8 x half> %1,
184    <vscale x 4 x half> %2,
185    <vscale x 8 x i1> %3,
186    iXLen %4)
187
188  ret <vscale x 4 x half> %a
189}
190
191declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
192  <vscale x 4 x half>,
193  <vscale x 16 x half>,
194  <vscale x 4 x half>,
195  iXLen);
196
197define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
198; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16:
199; CHECK:       # %bb.0: # %entry
200; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
201; CHECK-NEXT:    vfredmax.vs v8, v12, v9
202; CHECK-NEXT:    ret
203entry:
204  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
205    <vscale x 4 x half> %0,
206    <vscale x 16 x half> %1,
207    <vscale x 4 x half> %2,
208    iXLen %3)
209
210  ret <vscale x 4 x half> %a
211}
212
213declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
214  <vscale x 4 x half>,
215  <vscale x 16 x half>,
216  <vscale x 4 x half>,
217  <vscale x 16 x i1>,
218  iXLen);
219
220define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
224; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
225; CHECK-NEXT:    ret
226entry:
227  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
228    <vscale x 4 x half> %0,
229    <vscale x 16 x half> %1,
230    <vscale x 4 x half> %2,
231    <vscale x 16 x i1> %3,
232    iXLen %4)
233
234  ret <vscale x 4 x half> %a
235}
236
237declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
238  <vscale x 4 x half>,
239  <vscale x 32 x half>,
240  <vscale x 4 x half>,
241  iXLen);
242
243define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
244; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
247; CHECK-NEXT:    vfredmax.vs v8, v16, v9
248; CHECK-NEXT:    ret
249entry:
250  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
251    <vscale x 4 x half> %0,
252    <vscale x 32 x half> %1,
253    <vscale x 4 x half> %2,
254    iXLen %3)
255
256  ret <vscale x 4 x half> %a
257}
258
259declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
260  <vscale x 4 x half>,
261  <vscale x 32 x half>,
262  <vscale x 4 x half>,
263  <vscale x 32 x i1>,
264  iXLen);
265
266define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16:
268; CHECK:       # %bb.0: # %entry
269; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
270; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
271; CHECK-NEXT:    ret
272entry:
273  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
274    <vscale x 4 x half> %0,
275    <vscale x 32 x half> %1,
276    <vscale x 4 x half> %2,
277    <vscale x 32 x i1> %3,
278    iXLen %4)
279
280  ret <vscale x 4 x half> %a
281}
282
283declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
284  <vscale x 2 x float>,
285  <vscale x 1 x float>,
286  <vscale x 2 x float>,
287  iXLen);
288
289define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
290; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
293; CHECK-NEXT:    vfredmax.vs v8, v9, v10
294; CHECK-NEXT:    ret
295entry:
296  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
297    <vscale x 2 x float> %0,
298    <vscale x 1 x float> %1,
299    <vscale x 2 x float> %2,
300    iXLen %3)
301
302  ret <vscale x 2 x float> %a
303}
304
305declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
306  <vscale x 2 x float>,
307  <vscale x 1 x float>,
308  <vscale x 2 x float>,
309  <vscale x 1 x i1>,
310  iXLen);
311
312define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
313; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32:
314; CHECK:       # %bb.0: # %entry
315; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
316; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
317; CHECK-NEXT:    ret
318entry:
319  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
320    <vscale x 2 x float> %0,
321    <vscale x 1 x float> %1,
322    <vscale x 2 x float> %2,
323    <vscale x 1 x i1> %3,
324    iXLen %4)
325
326  ret <vscale x 2 x float> %a
327}
328
329declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
330  <vscale x 2 x float>,
331  <vscale x 2 x float>,
332  <vscale x 2 x float>,
333  iXLen);
334
335define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
336; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32:
337; CHECK:       # %bb.0: # %entry
338; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
339; CHECK-NEXT:    vfredmax.vs v8, v9, v10
340; CHECK-NEXT:    ret
341entry:
342  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
343    <vscale x 2 x float> %0,
344    <vscale x 2 x float> %1,
345    <vscale x 2 x float> %2,
346    iXLen %3)
347
348  ret <vscale x 2 x float> %a
349}
350
351declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
352  <vscale x 2 x float>,
353  <vscale x 2 x float>,
354  <vscale x 2 x float>,
355  <vscale x 2 x i1>,
356  iXLen);
357
358define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
359; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32:
360; CHECK:       # %bb.0: # %entry
361; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
362; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
363; CHECK-NEXT:    ret
364entry:
365  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
366    <vscale x 2 x float> %0,
367    <vscale x 2 x float> %1,
368    <vscale x 2 x float> %2,
369    <vscale x 2 x i1> %3,
370    iXLen %4)
371
372  ret <vscale x 2 x float> %a
373}
374
375declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
376  <vscale x 2 x float>,
377  <vscale x 4 x float>,
378  <vscale x 2 x float>,
379  iXLen);
380
381define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
382; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32:
383; CHECK:       # %bb.0: # %entry
384; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
385; CHECK-NEXT:    vfredmax.vs v8, v10, v9
386; CHECK-NEXT:    ret
387entry:
388  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
389    <vscale x 2 x float> %0,
390    <vscale x 4 x float> %1,
391    <vscale x 2 x float> %2,
392    iXLen %3)
393
394  ret <vscale x 2 x float> %a
395}
396
397declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
398  <vscale x 2 x float>,
399  <vscale x 4 x float>,
400  <vscale x 2 x float>,
401  <vscale x 4 x i1>,
402  iXLen);
403
404define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
405; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32:
406; CHECK:       # %bb.0: # %entry
407; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
408; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
409; CHECK-NEXT:    ret
410entry:
411  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
412    <vscale x 2 x float> %0,
413    <vscale x 4 x float> %1,
414    <vscale x 2 x float> %2,
415    <vscale x 4 x i1> %3,
416    iXLen %4)
417
418  ret <vscale x 2 x float> %a
419}
420
421declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
422  <vscale x 2 x float>,
423  <vscale x 8 x float>,
424  <vscale x 2 x float>,
425  iXLen);
426
427define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
428; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32:
429; CHECK:       # %bb.0: # %entry
430; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
431; CHECK-NEXT:    vfredmax.vs v8, v12, v9
432; CHECK-NEXT:    ret
433entry:
434  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
435    <vscale x 2 x float> %0,
436    <vscale x 8 x float> %1,
437    <vscale x 2 x float> %2,
438    iXLen %3)
439
440  ret <vscale x 2 x float> %a
441}
442
443declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
444  <vscale x 2 x float>,
445  <vscale x 8 x float>,
446  <vscale x 2 x float>,
447  <vscale x 8 x i1>,
448  iXLen);
449
450define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
451; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
454; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
455; CHECK-NEXT:    ret
456entry:
457  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
458    <vscale x 2 x float> %0,
459    <vscale x 8 x float> %1,
460    <vscale x 2 x float> %2,
461    <vscale x 8 x i1> %3,
462    iXLen %4)
463
464  ret <vscale x 2 x float> %a
465}
466
467declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
468  <vscale x 2 x float>,
469  <vscale x 16 x float>,
470  <vscale x 2 x float>,
471  iXLen);
472
473define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
474; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32:
475; CHECK:       # %bb.0: # %entry
476; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
477; CHECK-NEXT:    vfredmax.vs v8, v16, v9
478; CHECK-NEXT:    ret
479entry:
480  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
481    <vscale x 2 x float> %0,
482    <vscale x 16 x float> %1,
483    <vscale x 2 x float> %2,
484    iXLen %3)
485
486  ret <vscale x 2 x float> %a
487}
488
489declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
490  <vscale x 2 x float>,
491  <vscale x 16 x float>,
492  <vscale x 2 x float>,
493  <vscale x 16 x i1>,
494  iXLen);
495
496define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
497; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32:
498; CHECK:       # %bb.0: # %entry
499; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
500; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
501; CHECK-NEXT:    ret
502entry:
503  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
504    <vscale x 2 x float> %0,
505    <vscale x 16 x float> %1,
506    <vscale x 2 x float> %2,
507    <vscale x 16 x i1> %3,
508    iXLen %4)
509
510  ret <vscale x 2 x float> %a
511}
512
513declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
514  <vscale x 1 x double>,
515  <vscale x 1 x double>,
516  <vscale x 1 x double>,
517  iXLen);
518
519define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
520; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64:
521; CHECK:       # %bb.0: # %entry
522; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
523; CHECK-NEXT:    vfredmax.vs v8, v9, v10
524; CHECK-NEXT:    ret
525entry:
526  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
527    <vscale x 1 x double> %0,
528    <vscale x 1 x double> %1,
529    <vscale x 1 x double> %2,
530    iXLen %3)
531
532  ret <vscale x 1 x double> %a
533}
534
535declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
536  <vscale x 1 x double>,
537  <vscale x 1 x double>,
538  <vscale x 1 x double>,
539  <vscale x 1 x i1>,
540  iXLen);
541
542define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
543; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64:
544; CHECK:       # %bb.0: # %entry
545; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
546; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
550    <vscale x 1 x double> %0,
551    <vscale x 1 x double> %1,
552    <vscale x 1 x double> %2,
553    <vscale x 1 x i1> %3,
554    iXLen %4)
555
556  ret <vscale x 1 x double> %a
557}
558
559declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
560  <vscale x 1 x double>,
561  <vscale x 2 x double>,
562  <vscale x 1 x double>,
563  iXLen);
564
565define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
566; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64:
567; CHECK:       # %bb.0: # %entry
568; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
569; CHECK-NEXT:    vfredmax.vs v8, v10, v9
570; CHECK-NEXT:    ret
571entry:
572  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
573    <vscale x 1 x double> %0,
574    <vscale x 2 x double> %1,
575    <vscale x 1 x double> %2,
576    iXLen %3)
577
578  ret <vscale x 1 x double> %a
579}
580
581declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
582  <vscale x 1 x double>,
583  <vscale x 2 x double>,
584  <vscale x 1 x double>,
585  <vscale x 2 x i1>,
586  iXLen);
587
588define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
589; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
592; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
593; CHECK-NEXT:    ret
594entry:
595  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
596    <vscale x 1 x double> %0,
597    <vscale x 2 x double> %1,
598    <vscale x 1 x double> %2,
599    <vscale x 2 x i1> %3,
600    iXLen %4)
601
602  ret <vscale x 1 x double> %a
603}
604
605declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
606  <vscale x 1 x double>,
607  <vscale x 4 x double>,
608  <vscale x 1 x double>,
609  iXLen);
610
611define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
612; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64:
613; CHECK:       # %bb.0: # %entry
614; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
615; CHECK-NEXT:    vfredmax.vs v8, v12, v9
616; CHECK-NEXT:    ret
617entry:
618  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
619    <vscale x 1 x double> %0,
620    <vscale x 4 x double> %1,
621    <vscale x 1 x double> %2,
622    iXLen %3)
623
624  ret <vscale x 1 x double> %a
625}
626
627declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
628  <vscale x 1 x double>,
629  <vscale x 4 x double>,
630  <vscale x 1 x double>,
631  <vscale x 4 x i1>,
632  iXLen);
633
634define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
635; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64:
636; CHECK:       # %bb.0: # %entry
637; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
638; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
639; CHECK-NEXT:    ret
640entry:
641  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
642    <vscale x 1 x double> %0,
643    <vscale x 4 x double> %1,
644    <vscale x 1 x double> %2,
645    <vscale x 4 x i1> %3,
646    iXLen %4)
647
648  ret <vscale x 1 x double> %a
649}
650
651declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
652  <vscale x 1 x double>,
653  <vscale x 8 x double>,
654  <vscale x 1 x double>,
655  iXLen);
656
657define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
658; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64:
659; CHECK:       # %bb.0: # %entry
660; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
661; CHECK-NEXT:    vfredmax.vs v8, v16, v9
662; CHECK-NEXT:    ret
663entry:
664  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
665    <vscale x 1 x double> %0,
666    <vscale x 8 x double> %1,
667    <vscale x 1 x double> %2,
668    iXLen %3)
669
670  ret <vscale x 1 x double> %a
671}
672
673declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
674  <vscale x 1 x double>,
675  <vscale x 8 x double>,
676  <vscale x 1 x double>,
677  <vscale x 8 x i1>,
678  iXLen);
679
680define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
681; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64:
682; CHECK:       # %bb.0: # %entry
683; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
684; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
685; CHECK-NEXT:    ret
686entry:
687  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
688    <vscale x 1 x double> %0,
689    <vscale x 8 x double> %1,
690    <vscale x 1 x double> %2,
691    <vscale x 8 x i1> %3,
692    iXLen %4)
693
694  ret <vscale x 1 x double> %a
695}
696