xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll (revision 36e4176f1d83d04cdebb4e1870561099b2478d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 8 x i7> @llvm.vp.umax.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
8
9define <vscale x 8 x i7> @vmaxu_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10; CHECK-LABEL: vmaxu_vx_nxv8i7:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    li a2, 127
13; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
14; CHECK-NEXT:    vmv.v.x v9, a0
15; CHECK-NEXT:    vand.vx v8, v8, a2, v0.t
16; CHECK-NEXT:    vand.vx v9, v9, a2, v0.t
17; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
18; CHECK-NEXT:    ret
19  %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
20  %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
21  %v = call <vscale x 8 x i7> @llvm.vp.umax.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
22  ret <vscale x 8 x i7> %v
23}
24
25declare <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
26
27define <vscale x 1 x i8> @vmaxu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
28; CHECK-LABEL: vmaxu_vv_nxv1i8:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
31; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
32; CHECK-NEXT:    ret
33  %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
34  ret <vscale x 1 x i8> %v
35}
36
37define <vscale x 1 x i8> @vmaxu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
38; CHECK-LABEL: vmaxu_vv_nxv1i8_unmasked:
39; CHECK:       # %bb.0:
40; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
41; CHECK-NEXT:    vmaxu.vv v8, v8, v9
42; CHECK-NEXT:    ret
43  %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
44  ret <vscale x 1 x i8> %v
45}
46
47define <vscale x 1 x i8> @vmaxu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
48; CHECK-LABEL: vmaxu_vx_nxv1i8:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
51; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
52; CHECK-NEXT:    ret
53  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
54  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
55  %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
56  ret <vscale x 1 x i8> %v
57}
58
59define <vscale x 1 x i8> @vmaxu_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
60; CHECK-LABEL: vmaxu_vx_nxv1i8_commute:
61; CHECK:       # %bb.0:
62; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
63; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
64; CHECK-NEXT:    ret
65  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
66  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
67  %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
68  ret <vscale x 1 x i8> %v
69}
70
71define <vscale x 1 x i8> @vmaxu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
72; CHECK-LABEL: vmaxu_vx_nxv1i8_unmasked:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
75; CHECK-NEXT:    vmaxu.vx v8, v8, a0
76; CHECK-NEXT:    ret
77  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
78  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
79  %v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
80  ret <vscale x 1 x i8> %v
81}
82
83declare <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
84
85define <vscale x 2 x i8> @vmaxu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
86; CHECK-LABEL: vmaxu_vv_nxv2i8:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
89; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
90; CHECK-NEXT:    ret
91  %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
92  ret <vscale x 2 x i8> %v
93}
94
95define <vscale x 2 x i8> @vmaxu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
96; CHECK-LABEL: vmaxu_vv_nxv2i8_unmasked:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
99; CHECK-NEXT:    vmaxu.vv v8, v8, v9
100; CHECK-NEXT:    ret
101  %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
102  ret <vscale x 2 x i8> %v
103}
104
105define <vscale x 2 x i8> @vmaxu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
106; CHECK-LABEL: vmaxu_vx_nxv2i8:
107; CHECK:       # %bb.0:
108; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
109; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
110; CHECK-NEXT:    ret
111  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
112  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
113  %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
114  ret <vscale x 2 x i8> %v
115}
116
117define <vscale x 2 x i8> @vmaxu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
118; CHECK-LABEL: vmaxu_vx_nxv2i8_unmasked:
119; CHECK:       # %bb.0:
120; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
121; CHECK-NEXT:    vmaxu.vx v8, v8, a0
122; CHECK-NEXT:    ret
123  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
124  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
125  %v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
126  ret <vscale x 2 x i8> %v
127}
128
129declare <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
130
131define <vscale x 3 x i8> @vmaxu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
132; CHECK-LABEL: vmaxu_vv_nxv3i8:
133; CHECK:       # %bb.0:
134; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
135; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
136; CHECK-NEXT:    ret
137  %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
138  ret <vscale x 3 x i8> %v
139}
140
141define <vscale x 3 x i8> @vmaxu_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
142; CHECK-LABEL: vmaxu_vv_nxv3i8_unmasked:
143; CHECK:       # %bb.0:
144; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
145; CHECK-NEXT:    vmaxu.vv v8, v8, v9
146; CHECK-NEXT:    ret
147  %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
148  ret <vscale x 3 x i8> %v
149}
150
151define <vscale x 3 x i8> @vmaxu_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
152; CHECK-LABEL: vmaxu_vx_nxv3i8:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
155; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
156; CHECK-NEXT:    ret
157  %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
158  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
159  %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
160  ret <vscale x 3 x i8> %v
161}
162
163define <vscale x 3 x i8> @vmaxu_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
164; CHECK-LABEL: vmaxu_vx_nxv3i8_unmasked:
165; CHECK:       # %bb.0:
166; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
167; CHECK-NEXT:    vmaxu.vx v8, v8, a0
168; CHECK-NEXT:    ret
169  %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
170  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
171  %v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
172  ret <vscale x 3 x i8> %v
173}
174
175declare <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
176
177define <vscale x 4 x i8> @vmaxu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
178; CHECK-LABEL: vmaxu_vv_nxv4i8:
179; CHECK:       # %bb.0:
180; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
181; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
182; CHECK-NEXT:    ret
183  %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
184  ret <vscale x 4 x i8> %v
185}
186
187define <vscale x 4 x i8> @vmaxu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
188; CHECK-LABEL: vmaxu_vv_nxv4i8_unmasked:
189; CHECK:       # %bb.0:
190; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
191; CHECK-NEXT:    vmaxu.vv v8, v8, v9
192; CHECK-NEXT:    ret
193  %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
194  ret <vscale x 4 x i8> %v
195}
196
197define <vscale x 4 x i8> @vmaxu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
198; CHECK-LABEL: vmaxu_vx_nxv4i8:
199; CHECK:       # %bb.0:
200; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
201; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
202; CHECK-NEXT:    ret
203  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
204  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
205  %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
206  ret <vscale x 4 x i8> %v
207}
208
209define <vscale x 4 x i8> @vmaxu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
210; CHECK-LABEL: vmaxu_vx_nxv4i8_unmasked:
211; CHECK:       # %bb.0:
212; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
213; CHECK-NEXT:    vmaxu.vx v8, v8, a0
214; CHECK-NEXT:    ret
215  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
216  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
217  %v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
218  ret <vscale x 4 x i8> %v
219}
220
221declare <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
222
223define <vscale x 8 x i8> @vmaxu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
224; CHECK-LABEL: vmaxu_vv_nxv8i8:
225; CHECK:       # %bb.0:
226; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
227; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
228; CHECK-NEXT:    ret
229  %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
230  ret <vscale x 8 x i8> %v
231}
232
233define <vscale x 8 x i8> @vmaxu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
234; CHECK-LABEL: vmaxu_vv_nxv8i8_unmasked:
235; CHECK:       # %bb.0:
236; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
237; CHECK-NEXT:    vmaxu.vv v8, v8, v9
238; CHECK-NEXT:    ret
239  %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
240  ret <vscale x 8 x i8> %v
241}
242
243define <vscale x 8 x i8> @vmaxu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
244; CHECK-LABEL: vmaxu_vx_nxv8i8:
245; CHECK:       # %bb.0:
246; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
247; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
248; CHECK-NEXT:    ret
249  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
250  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
251  %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
252  ret <vscale x 8 x i8> %v
253}
254
255define <vscale x 8 x i8> @vmaxu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
256; CHECK-LABEL: vmaxu_vx_nxv8i8_unmasked:
257; CHECK:       # %bb.0:
258; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
259; CHECK-NEXT:    vmaxu.vx v8, v8, a0
260; CHECK-NEXT:    ret
261  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
262  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
263  %v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
264  ret <vscale x 8 x i8> %v
265}
266
267declare <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
268
269define <vscale x 16 x i8> @vmaxu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
270; CHECK-LABEL: vmaxu_vv_nxv16i8:
271; CHECK:       # %bb.0:
272; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
273; CHECK-NEXT:    vmaxu.vv v8, v8, v10, v0.t
274; CHECK-NEXT:    ret
275  %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
276  ret <vscale x 16 x i8> %v
277}
278
279define <vscale x 16 x i8> @vmaxu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
280; CHECK-LABEL: vmaxu_vv_nxv16i8_unmasked:
281; CHECK:       # %bb.0:
282; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
283; CHECK-NEXT:    vmaxu.vv v8, v8, v10
284; CHECK-NEXT:    ret
285  %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
286  ret <vscale x 16 x i8> %v
287}
288
289define <vscale x 16 x i8> @vmaxu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
290; CHECK-LABEL: vmaxu_vx_nxv16i8:
291; CHECK:       # %bb.0:
292; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
293; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
294; CHECK-NEXT:    ret
295  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
296  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
297  %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
298  ret <vscale x 16 x i8> %v
299}
300
301define <vscale x 16 x i8> @vmaxu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
302; CHECK-LABEL: vmaxu_vx_nxv16i8_unmasked:
303; CHECK:       # %bb.0:
304; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
305; CHECK-NEXT:    vmaxu.vx v8, v8, a0
306; CHECK-NEXT:    ret
307  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
308  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
309  %v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
310  ret <vscale x 16 x i8> %v
311}
312
313declare <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
314
315define <vscale x 32 x i8> @vmaxu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
316; CHECK-LABEL: vmaxu_vv_nxv32i8:
317; CHECK:       # %bb.0:
318; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
319; CHECK-NEXT:    vmaxu.vv v8, v8, v12, v0.t
320; CHECK-NEXT:    ret
321  %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
322  ret <vscale x 32 x i8> %v
323}
324
325define <vscale x 32 x i8> @vmaxu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
326; CHECK-LABEL: vmaxu_vv_nxv32i8_unmasked:
327; CHECK:       # %bb.0:
328; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
329; CHECK-NEXT:    vmaxu.vv v8, v8, v12
330; CHECK-NEXT:    ret
331  %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
332  ret <vscale x 32 x i8> %v
333}
334
335define <vscale x 32 x i8> @vmaxu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
336; CHECK-LABEL: vmaxu_vx_nxv32i8:
337; CHECK:       # %bb.0:
338; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
339; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
340; CHECK-NEXT:    ret
341  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
342  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
343  %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
344  ret <vscale x 32 x i8> %v
345}
346
347define <vscale x 32 x i8> @vmaxu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
348; CHECK-LABEL: vmaxu_vx_nxv32i8_unmasked:
349; CHECK:       # %bb.0:
350; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
351; CHECK-NEXT:    vmaxu.vx v8, v8, a0
352; CHECK-NEXT:    ret
353  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
354  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
355  %v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
356  ret <vscale x 32 x i8> %v
357}
358
359declare <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
360
361define <vscale x 64 x i8> @vmaxu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
362; CHECK-LABEL: vmaxu_vv_nxv64i8:
363; CHECK:       # %bb.0:
364; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
365; CHECK-NEXT:    vmaxu.vv v8, v8, v16, v0.t
366; CHECK-NEXT:    ret
367  %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
368  ret <vscale x 64 x i8> %v
369}
370
371define <vscale x 64 x i8> @vmaxu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
372; CHECK-LABEL: vmaxu_vv_nxv64i8_unmasked:
373; CHECK:       # %bb.0:
374; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
375; CHECK-NEXT:    vmaxu.vv v8, v8, v16
376; CHECK-NEXT:    ret
377  %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
378  ret <vscale x 64 x i8> %v
379}
380
381define <vscale x 64 x i8> @vmaxu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
382; CHECK-LABEL: vmaxu_vx_nxv64i8:
383; CHECK:       # %bb.0:
384; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
385; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
386; CHECK-NEXT:    ret
387  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
388  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
389  %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
390  ret <vscale x 64 x i8> %v
391}
392
393define <vscale x 64 x i8> @vmaxu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
394; CHECK-LABEL: vmaxu_vx_nxv64i8_unmasked:
395; CHECK:       # %bb.0:
396; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
397; CHECK-NEXT:    vmaxu.vx v8, v8, a0
398; CHECK-NEXT:    ret
399  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
400  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
401  %v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
402  ret <vscale x 64 x i8> %v
403}
404
405; Test that split-legalization works when the mask itself needs splitting.
406
407declare <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8>, <vscale x 128 x i8>, <vscale x 128 x i1>, i32)
408
409define <vscale x 128 x i8> @vmaxu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
410; CHECK-LABEL: vmaxu_vx_nxv128i8:
411; CHECK:       # %bb.0:
412; CHECK-NEXT:    vsetvli a3, zero, e8, m8, ta, ma
413; CHECK-NEXT:    vmv1r.v v24, v0
414; CHECK-NEXT:    vlm.v v0, (a1)
415; CHECK-NEXT:    csrr a1, vlenb
416; CHECK-NEXT:    slli a1, a1, 3
417; CHECK-NEXT:    sub a3, a2, a1
418; CHECK-NEXT:    sltu a4, a2, a3
419; CHECK-NEXT:    addi a4, a4, -1
420; CHECK-NEXT:    and a3, a4, a3
421; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
422; CHECK-NEXT:    vmaxu.vx v16, v16, a0, v0.t
423; CHECK-NEXT:    bltu a2, a1, .LBB34_2
424; CHECK-NEXT:  # %bb.1:
425; CHECK-NEXT:    mv a2, a1
426; CHECK-NEXT:  .LBB34_2:
427; CHECK-NEXT:    vmv1r.v v0, v24
428; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
429; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
430; CHECK-NEXT:    ret
431  %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
432  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
433  %v = call <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
434  ret <vscale x 128 x i8> %v
435}
436
437define <vscale x 128 x i8> @vmaxu_vx_nxv128i8_unmasked(<vscale x 128 x i8> %va, i8 %b, i32 zeroext %evl) {
438; CHECK-LABEL: vmaxu_vx_nxv128i8_unmasked:
439; CHECK:       # %bb.0:
440; CHECK-NEXT:    csrr a2, vlenb
441; CHECK-NEXT:    slli a2, a2, 3
442; CHECK-NEXT:    sub a3, a1, a2
443; CHECK-NEXT:    sltu a4, a1, a3
444; CHECK-NEXT:    addi a4, a4, -1
445; CHECK-NEXT:    and a3, a4, a3
446; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
447; CHECK-NEXT:    vmaxu.vx v16, v16, a0
448; CHECK-NEXT:    bltu a1, a2, .LBB35_2
449; CHECK-NEXT:  # %bb.1:
450; CHECK-NEXT:    mv a1, a2
451; CHECK-NEXT:  .LBB35_2:
452; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
453; CHECK-NEXT:    vmaxu.vx v8, v8, a0
454; CHECK-NEXT:    ret
455  %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
456  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
457  %v = call <vscale x 128 x i8> @llvm.vp.umax.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> splat (i1 true), i32 %evl)
458  ret <vscale x 128 x i8> %v
459}
460
461declare <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
462
463define <vscale x 1 x i16> @vmaxu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
464; CHECK-LABEL: vmaxu_vv_nxv1i16:
465; CHECK:       # %bb.0:
466; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
467; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
468; CHECK-NEXT:    ret
469  %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
470  ret <vscale x 1 x i16> %v
471}
472
473define <vscale x 1 x i16> @vmaxu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
474; CHECK-LABEL: vmaxu_vv_nxv1i16_unmasked:
475; CHECK:       # %bb.0:
476; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
477; CHECK-NEXT:    vmaxu.vv v8, v8, v9
478; CHECK-NEXT:    ret
479  %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
480  ret <vscale x 1 x i16> %v
481}
482
483define <vscale x 1 x i16> @vmaxu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
484; CHECK-LABEL: vmaxu_vx_nxv1i16:
485; CHECK:       # %bb.0:
486; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
487; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
488; CHECK-NEXT:    ret
489  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
490  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
491  %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
492  ret <vscale x 1 x i16> %v
493}
494
495define <vscale x 1 x i16> @vmaxu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
496; CHECK-LABEL: vmaxu_vx_nxv1i16_unmasked:
497; CHECK:       # %bb.0:
498; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
499; CHECK-NEXT:    vmaxu.vx v8, v8, a0
500; CHECK-NEXT:    ret
501  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
502  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
503  %v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
504  ret <vscale x 1 x i16> %v
505}
506
507declare <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
508
509define <vscale x 2 x i16> @vmaxu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
510; CHECK-LABEL: vmaxu_vv_nxv2i16:
511; CHECK:       # %bb.0:
512; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
513; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
514; CHECK-NEXT:    ret
515  %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
516  ret <vscale x 2 x i16> %v
517}
518
519define <vscale x 2 x i16> @vmaxu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
520; CHECK-LABEL: vmaxu_vv_nxv2i16_unmasked:
521; CHECK:       # %bb.0:
522; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
523; CHECK-NEXT:    vmaxu.vv v8, v8, v9
524; CHECK-NEXT:    ret
525  %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
526  ret <vscale x 2 x i16> %v
527}
528
529define <vscale x 2 x i16> @vmaxu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
530; CHECK-LABEL: vmaxu_vx_nxv2i16:
531; CHECK:       # %bb.0:
532; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
533; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
534; CHECK-NEXT:    ret
535  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
536  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
537  %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
538  ret <vscale x 2 x i16> %v
539}
540
541define <vscale x 2 x i16> @vmaxu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
542; CHECK-LABEL: vmaxu_vx_nxv2i16_unmasked:
543; CHECK:       # %bb.0:
544; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
545; CHECK-NEXT:    vmaxu.vx v8, v8, a0
546; CHECK-NEXT:    ret
547  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
548  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
549  %v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
550  ret <vscale x 2 x i16> %v
551}
552
553declare <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
554
555define <vscale x 4 x i16> @vmaxu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
556; CHECK-LABEL: vmaxu_vv_nxv4i16:
557; CHECK:       # %bb.0:
558; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
559; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
560; CHECK-NEXT:    ret
561  %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
562  ret <vscale x 4 x i16> %v
563}
564
565define <vscale x 4 x i16> @vmaxu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
566; CHECK-LABEL: vmaxu_vv_nxv4i16_unmasked:
567; CHECK:       # %bb.0:
568; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
569; CHECK-NEXT:    vmaxu.vv v8, v8, v9
570; CHECK-NEXT:    ret
571  %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
572  ret <vscale x 4 x i16> %v
573}
574
575define <vscale x 4 x i16> @vmaxu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
576; CHECK-LABEL: vmaxu_vx_nxv4i16:
577; CHECK:       # %bb.0:
578; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
579; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
580; CHECK-NEXT:    ret
581  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
582  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
583  %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
584  ret <vscale x 4 x i16> %v
585}
586
587define <vscale x 4 x i16> @vmaxu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
588; CHECK-LABEL: vmaxu_vx_nxv4i16_unmasked:
589; CHECK:       # %bb.0:
590; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
591; CHECK-NEXT:    vmaxu.vx v8, v8, a0
592; CHECK-NEXT:    ret
593  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
594  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
595  %v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
596  ret <vscale x 4 x i16> %v
597}
598
599declare <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
600
601define <vscale x 8 x i16> @vmaxu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
602; CHECK-LABEL: vmaxu_vv_nxv8i16:
603; CHECK:       # %bb.0:
604; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
605; CHECK-NEXT:    vmaxu.vv v8, v8, v10, v0.t
606; CHECK-NEXT:    ret
607  %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
608  ret <vscale x 8 x i16> %v
609}
610
611define <vscale x 8 x i16> @vmaxu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
612; CHECK-LABEL: vmaxu_vv_nxv8i16_unmasked:
613; CHECK:       # %bb.0:
614; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
615; CHECK-NEXT:    vmaxu.vv v8, v8, v10
616; CHECK-NEXT:    ret
617  %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
618  ret <vscale x 8 x i16> %v
619}
620
621define <vscale x 8 x i16> @vmaxu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
622; CHECK-LABEL: vmaxu_vx_nxv8i16:
623; CHECK:       # %bb.0:
624; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
625; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
626; CHECK-NEXT:    ret
627  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
628  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
629  %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
630  ret <vscale x 8 x i16> %v
631}
632
633define <vscale x 8 x i16> @vmaxu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
634; CHECK-LABEL: vmaxu_vx_nxv8i16_unmasked:
635; CHECK:       # %bb.0:
636; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
637; CHECK-NEXT:    vmaxu.vx v8, v8, a0
638; CHECK-NEXT:    ret
639  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
640  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
641  %v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
642  ret <vscale x 8 x i16> %v
643}
644
645declare <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
646
647define <vscale x 16 x i16> @vmaxu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
648; CHECK-LABEL: vmaxu_vv_nxv16i16:
649; CHECK:       # %bb.0:
650; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
651; CHECK-NEXT:    vmaxu.vv v8, v8, v12, v0.t
652; CHECK-NEXT:    ret
653  %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
654  ret <vscale x 16 x i16> %v
655}
656
657define <vscale x 16 x i16> @vmaxu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
658; CHECK-LABEL: vmaxu_vv_nxv16i16_unmasked:
659; CHECK:       # %bb.0:
660; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
661; CHECK-NEXT:    vmaxu.vv v8, v8, v12
662; CHECK-NEXT:    ret
663  %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
664  ret <vscale x 16 x i16> %v
665}
666
667define <vscale x 16 x i16> @vmaxu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
668; CHECK-LABEL: vmaxu_vx_nxv16i16:
669; CHECK:       # %bb.0:
670; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
671; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
672; CHECK-NEXT:    ret
673  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
674  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
675  %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
676  ret <vscale x 16 x i16> %v
677}
678
679define <vscale x 16 x i16> @vmaxu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
680; CHECK-LABEL: vmaxu_vx_nxv16i16_unmasked:
681; CHECK:       # %bb.0:
682; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
683; CHECK-NEXT:    vmaxu.vx v8, v8, a0
684; CHECK-NEXT:    ret
685  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
686  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
687  %v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
688  ret <vscale x 16 x i16> %v
689}
690
691declare <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
692
693define <vscale x 32 x i16> @vmaxu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
694; CHECK-LABEL: vmaxu_vv_nxv32i16:
695; CHECK:       # %bb.0:
696; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
697; CHECK-NEXT:    vmaxu.vv v8, v8, v16, v0.t
698; CHECK-NEXT:    ret
699  %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
700  ret <vscale x 32 x i16> %v
701}
702
703define <vscale x 32 x i16> @vmaxu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
704; CHECK-LABEL: vmaxu_vv_nxv32i16_unmasked:
705; CHECK:       # %bb.0:
706; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
707; CHECK-NEXT:    vmaxu.vv v8, v8, v16
708; CHECK-NEXT:    ret
709  %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
710  ret <vscale x 32 x i16> %v
711}
712
713define <vscale x 32 x i16> @vmaxu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
714; CHECK-LABEL: vmaxu_vx_nxv32i16:
715; CHECK:       # %bb.0:
716; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
717; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
718; CHECK-NEXT:    ret
719  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
720  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
721  %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
722  ret <vscale x 32 x i16> %v
723}
724
725define <vscale x 32 x i16> @vmaxu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
726; CHECK-LABEL: vmaxu_vx_nxv32i16_unmasked:
727; CHECK:       # %bb.0:
728; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
729; CHECK-NEXT:    vmaxu.vx v8, v8, a0
730; CHECK-NEXT:    ret
731  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
732  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
733  %v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
734  ret <vscale x 32 x i16> %v
735}
736
737declare <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
738
739define <vscale x 1 x i32> @vmaxu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
740; CHECK-LABEL: vmaxu_vv_nxv1i32:
741; CHECK:       # %bb.0:
742; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
743; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
744; CHECK-NEXT:    ret
745  %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
746  ret <vscale x 1 x i32> %v
747}
748
749define <vscale x 1 x i32> @vmaxu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
750; CHECK-LABEL: vmaxu_vv_nxv1i32_unmasked:
751; CHECK:       # %bb.0:
752; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
753; CHECK-NEXT:    vmaxu.vv v8, v8, v9
754; CHECK-NEXT:    ret
755  %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
756  ret <vscale x 1 x i32> %v
757}
758
759define <vscale x 1 x i32> @vmaxu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
760; CHECK-LABEL: vmaxu_vx_nxv1i32:
761; CHECK:       # %bb.0:
762; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
763; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
764; CHECK-NEXT:    ret
765  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
766  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
767  %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
768  ret <vscale x 1 x i32> %v
769}
770
771define <vscale x 1 x i32> @vmaxu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
772; CHECK-LABEL: vmaxu_vx_nxv1i32_unmasked:
773; CHECK:       # %bb.0:
774; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
775; CHECK-NEXT:    vmaxu.vx v8, v8, a0
776; CHECK-NEXT:    ret
777  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
778  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
779  %v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
780  ret <vscale x 1 x i32> %v
781}
782
783declare <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
784
785define <vscale x 2 x i32> @vmaxu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
786; CHECK-LABEL: vmaxu_vv_nxv2i32:
787; CHECK:       # %bb.0:
788; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
789; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
790; CHECK-NEXT:    ret
791  %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
792  ret <vscale x 2 x i32> %v
793}
794
795define <vscale x 2 x i32> @vmaxu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
796; CHECK-LABEL: vmaxu_vv_nxv2i32_unmasked:
797; CHECK:       # %bb.0:
798; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
799; CHECK-NEXT:    vmaxu.vv v8, v8, v9
800; CHECK-NEXT:    ret
801  %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
802  ret <vscale x 2 x i32> %v
803}
804
805define <vscale x 2 x i32> @vmaxu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
806; CHECK-LABEL: vmaxu_vx_nxv2i32:
807; CHECK:       # %bb.0:
808; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
809; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
810; CHECK-NEXT:    ret
811  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
812  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
813  %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
814  ret <vscale x 2 x i32> %v
815}
816
817define <vscale x 2 x i32> @vmaxu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
818; CHECK-LABEL: vmaxu_vx_nxv2i32_unmasked:
819; CHECK:       # %bb.0:
820; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
821; CHECK-NEXT:    vmaxu.vx v8, v8, a0
822; CHECK-NEXT:    ret
823  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
824  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
825  %v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
826  ret <vscale x 2 x i32> %v
827}
828
829declare <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
830
831define <vscale x 4 x i32> @vmaxu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
832; CHECK-LABEL: vmaxu_vv_nxv4i32:
833; CHECK:       # %bb.0:
834; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
835; CHECK-NEXT:    vmaxu.vv v8, v8, v10, v0.t
836; CHECK-NEXT:    ret
837  %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
838  ret <vscale x 4 x i32> %v
839}
840
841define <vscale x 4 x i32> @vmaxu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
842; CHECK-LABEL: vmaxu_vv_nxv4i32_unmasked:
843; CHECK:       # %bb.0:
844; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
845; CHECK-NEXT:    vmaxu.vv v8, v8, v10
846; CHECK-NEXT:    ret
847  %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
848  ret <vscale x 4 x i32> %v
849}
850
851define <vscale x 4 x i32> @vmaxu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
852; CHECK-LABEL: vmaxu_vx_nxv4i32:
853; CHECK:       # %bb.0:
854; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
855; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
856; CHECK-NEXT:    ret
857  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
858  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
859  %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
860  ret <vscale x 4 x i32> %v
861}
862
863define <vscale x 4 x i32> @vmaxu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
864; CHECK-LABEL: vmaxu_vx_nxv4i32_unmasked:
865; CHECK:       # %bb.0:
866; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
867; CHECK-NEXT:    vmaxu.vx v8, v8, a0
868; CHECK-NEXT:    ret
869  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
870  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
871  %v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
872  ret <vscale x 4 x i32> %v
873}
874
875declare <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
876
877define <vscale x 8 x i32> @vmaxu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
878; CHECK-LABEL: vmaxu_vv_nxv8i32:
879; CHECK:       # %bb.0:
880; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
881; CHECK-NEXT:    vmaxu.vv v8, v8, v12, v0.t
882; CHECK-NEXT:    ret
883  %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
884  ret <vscale x 8 x i32> %v
885}
886
887define <vscale x 8 x i32> @vmaxu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
888; CHECK-LABEL: vmaxu_vv_nxv8i32_unmasked:
889; CHECK:       # %bb.0:
890; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
891; CHECK-NEXT:    vmaxu.vv v8, v8, v12
892; CHECK-NEXT:    ret
893  %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
894  ret <vscale x 8 x i32> %v
895}
896
897define <vscale x 8 x i32> @vmaxu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
898; CHECK-LABEL: vmaxu_vx_nxv8i32:
899; CHECK:       # %bb.0:
900; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
901; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
902; CHECK-NEXT:    ret
903  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
904  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
905  %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
906  ret <vscale x 8 x i32> %v
907}
908
909define <vscale x 8 x i32> @vmaxu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
910; CHECK-LABEL: vmaxu_vx_nxv8i32_unmasked:
911; CHECK:       # %bb.0:
912; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
913; CHECK-NEXT:    vmaxu.vx v8, v8, a0
914; CHECK-NEXT:    ret
915  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
916  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
917  %v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
918  ret <vscale x 8 x i32> %v
919}
920
921declare <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
922
923define <vscale x 16 x i32> @vmaxu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
924; CHECK-LABEL: vmaxu_vv_nxv16i32:
925; CHECK:       # %bb.0:
926; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
927; CHECK-NEXT:    vmaxu.vv v8, v8, v16, v0.t
928; CHECK-NEXT:    ret
929  %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
930  ret <vscale x 16 x i32> %v
931}
932
933define <vscale x 16 x i32> @vmaxu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
934; CHECK-LABEL: vmaxu_vv_nxv16i32_unmasked:
935; CHECK:       # %bb.0:
936; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
937; CHECK-NEXT:    vmaxu.vv v8, v8, v16
938; CHECK-NEXT:    ret
939  %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
940  ret <vscale x 16 x i32> %v
941}
942
943define <vscale x 16 x i32> @vmaxu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
944; CHECK-LABEL: vmaxu_vx_nxv16i32:
945; CHECK:       # %bb.0:
946; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
947; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
948; CHECK-NEXT:    ret
949  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
950  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
951  %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
952  ret <vscale x 16 x i32> %v
953}
954
955define <vscale x 16 x i32> @vmaxu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
956; CHECK-LABEL: vmaxu_vx_nxv16i32_unmasked:
957; CHECK:       # %bb.0:
958; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
959; CHECK-NEXT:    vmaxu.vx v8, v8, a0
960; CHECK-NEXT:    ret
961  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
962  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
963  %v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
964  ret <vscale x 16 x i32> %v
965}
966
967; Test that split-legalization works then the mask needs manual splitting.
968
969declare <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, <vscale x 32 x i1>, i32)
970
971define <vscale x 32 x i32> @vmaxu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
972; CHECK-LABEL: vmaxu_vx_nxv32i32:
973; CHECK:       # %bb.0:
974; CHECK-NEXT:    vsetvli a2, zero, e8, mf2, ta, ma
975; CHECK-NEXT:    vmv1r.v v24, v0
976; CHECK-NEXT:    csrr a2, vlenb
977; CHECK-NEXT:    srli a3, a2, 2
978; CHECK-NEXT:    slli a2, a2, 1
979; CHECK-NEXT:    vslidedown.vx v0, v0, a3
980; CHECK-NEXT:    sub a3, a1, a2
981; CHECK-NEXT:    sltu a4, a1, a3
982; CHECK-NEXT:    addi a4, a4, -1
983; CHECK-NEXT:    and a3, a4, a3
984; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
985; CHECK-NEXT:    vmaxu.vx v16, v16, a0, v0.t
986; CHECK-NEXT:    bltu a1, a2, .LBB80_2
987; CHECK-NEXT:  # %bb.1:
988; CHECK-NEXT:    mv a1, a2
989; CHECK-NEXT:  .LBB80_2:
990; CHECK-NEXT:    vmv1r.v v0, v24
991; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
992; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
993; CHECK-NEXT:    ret
994  %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
995  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
996  %v = call <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
997  ret <vscale x 32 x i32> %v
998}
999
1000define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 %b, i32 zeroext %evl) {
1001; CHECK-LABEL: vmaxu_vx_nxv32i32_unmasked:
1002; CHECK:       # %bb.0:
1003; CHECK-NEXT:    csrr a2, vlenb
1004; CHECK-NEXT:    slli a2, a2, 1
1005; CHECK-NEXT:    sub a3, a1, a2
1006; CHECK-NEXT:    sltu a4, a1, a3
1007; CHECK-NEXT:    addi a4, a4, -1
1008; CHECK-NEXT:    and a3, a4, a3
1009; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
1010; CHECK-NEXT:    vmaxu.vx v16, v16, a0
1011; CHECK-NEXT:    bltu a1, a2, .LBB81_2
1012; CHECK-NEXT:  # %bb.1:
1013; CHECK-NEXT:    mv a1, a2
1014; CHECK-NEXT:  .LBB81_2:
1015; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1016; CHECK-NEXT:    vmaxu.vx v8, v8, a0
1017; CHECK-NEXT:    ret
1018  %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1019  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1020  %v = call <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
1021  ret <vscale x 32 x i32> %v
1022}
1023
1024; Test splitting when the %evl is a constant (albeit an unknown one).
1025
1026declare i32 @llvm.vscale.i32()
1027
1028; FIXME: The upper half of the operation is doing nothing.
1029; FIXME: The branches comparing vscale vs. vscale should be constant-foldable.
1030
1031define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1032; CHECK-LABEL: vmaxu_vx_nxv32i32_evl_nx8:
1033; CHECK:       # %bb.0:
1034; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
1035; CHECK-NEXT:    vmv1r.v v24, v0
1036; CHECK-NEXT:    csrr a1, vlenb
1037; CHECK-NEXT:    srli a3, a1, 2
1038; CHECK-NEXT:    slli a2, a1, 1
1039; CHECK-NEXT:    vslidedown.vx v0, v0, a3
1040; CHECK-NEXT:    sub a3, a1, a2
1041; CHECK-NEXT:    sltu a4, a1, a3
1042; CHECK-NEXT:    addi a4, a4, -1
1043; CHECK-NEXT:    and a3, a4, a3
1044; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
1045; CHECK-NEXT:    vmaxu.vx v16, v16, a0, v0.t
1046; CHECK-NEXT:    bltu a1, a2, .LBB82_2
1047; CHECK-NEXT:  # %bb.1:
1048; CHECK-NEXT:    mv a1, a2
1049; CHECK-NEXT:  .LBB82_2:
1050; CHECK-NEXT:    vmv1r.v v0, v24
1051; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1052; CHECK-NEXT:    vmaxu.vx v8, v8, a0, v0.t
1053; CHECK-NEXT:    ret
1054  %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1055  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1056  %evl = call i32 @llvm.vscale.i32()
1057  %evl0 = mul i32 %evl, 8
1058  %v = call <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
1059  ret <vscale x 32 x i32> %v
1060}
1061
1062; FIXME: The upper half of the operation is doing nothing but we don't catch
1063; that on RV64; we issue a usubsat(and (vscale x 16), 0xffffffff, vscale x 16)
1064; (the "original" %evl is the "and", due to known-bits issues with legalizing
1065; the i32 %evl to i64) and this isn't detected as 0.
1066; This could be resolved in the future with more detailed KnownBits analysis
1067; for ISD::VSCALE.
1068
1069define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1070; RV32-LABEL: vmaxu_vx_nxv32i32_evl_nx16:
1071; RV32:       # %bb.0:
1072; RV32-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
1073; RV32-NEXT:    vmaxu.vx v8, v8, a0, v0.t
1074; RV32-NEXT:    ret
1075;
1076; RV64-LABEL: vmaxu_vx_nxv32i32_evl_nx16:
1077; RV64:       # %bb.0:
1078; RV64-NEXT:    csrr a1, vlenb
1079; RV64-NEXT:    srli a1, a1, 2
1080; RV64-NEXT:    vsetvli a2, zero, e8, mf2, ta, ma
1081; RV64-NEXT:    vslidedown.vx v24, v0, a1
1082; RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
1083; RV64-NEXT:    vmaxu.vx v8, v8, a0, v0.t
1084; RV64-NEXT:    vmv1r.v v0, v24
1085; RV64-NEXT:    vsetivli zero, 0, e32, m8, ta, ma
1086; RV64-NEXT:    vmaxu.vx v16, v16, a0, v0.t
1087; RV64-NEXT:    ret
1088  %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1089  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1090  %evl = call i32 @llvm.vscale.i32()
1091  %evl0 = mul i32 %evl, 16
1092  %v = call <vscale x 32 x i32> @llvm.vp.umax.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
1093  ret <vscale x 32 x i32> %v
1094}
1095
1096declare <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1097
1098define <vscale x 1 x i64> @vmaxu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1099; CHECK-LABEL: vmaxu_vv_nxv1i64:
1100; CHECK:       # %bb.0:
1101; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1102; CHECK-NEXT:    vmaxu.vv v8, v8, v9, v0.t
1103; CHECK-NEXT:    ret
1104  %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
1105  ret <vscale x 1 x i64> %v
1106}
1107
1108define <vscale x 1 x i64> @vmaxu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
1109; CHECK-LABEL: vmaxu_vv_nxv1i64_unmasked:
1110; CHECK:       # %bb.0:
1111; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1112; CHECK-NEXT:    vmaxu.vv v8, v8, v9
1113; CHECK-NEXT:    ret
1114  %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1115  ret <vscale x 1 x i64> %v
1116}
1117
1118define <vscale x 1 x i64> @vmaxu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1119; RV32-LABEL: vmaxu_vx_nxv1i64:
1120; RV32:       # %bb.0:
1121; RV32-NEXT:    addi sp, sp, -16
1122; RV32-NEXT:    .cfi_def_cfa_offset 16
1123; RV32-NEXT:    sw a0, 8(sp)
1124; RV32-NEXT:    sw a1, 12(sp)
1125; RV32-NEXT:    addi a0, sp, 8
1126; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1127; RV32-NEXT:    vlse64.v v9, (a0), zero
1128; RV32-NEXT:    vmaxu.vv v8, v8, v9, v0.t
1129; RV32-NEXT:    addi sp, sp, 16
1130; RV32-NEXT:    .cfi_def_cfa_offset 0
1131; RV32-NEXT:    ret
1132;
1133; RV64-LABEL: vmaxu_vx_nxv1i64:
1134; RV64:       # %bb.0:
1135; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1136; RV64-NEXT:    vmaxu.vx v8, v8, a0, v0.t
1137; RV64-NEXT:    ret
1138  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1139  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1140  %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
1141  ret <vscale x 1 x i64> %v
1142}
1143
1144define <vscale x 1 x i64> @vmaxu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
1145; RV32-LABEL: vmaxu_vx_nxv1i64_unmasked:
1146; RV32:       # %bb.0:
1147; RV32-NEXT:    addi sp, sp, -16
1148; RV32-NEXT:    .cfi_def_cfa_offset 16
1149; RV32-NEXT:    sw a0, 8(sp)
1150; RV32-NEXT:    sw a1, 12(sp)
1151; RV32-NEXT:    addi a0, sp, 8
1152; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1153; RV32-NEXT:    vlse64.v v9, (a0), zero
1154; RV32-NEXT:    vmaxu.vv v8, v8, v9
1155; RV32-NEXT:    addi sp, sp, 16
1156; RV32-NEXT:    .cfi_def_cfa_offset 0
1157; RV32-NEXT:    ret
1158;
1159; RV64-LABEL: vmaxu_vx_nxv1i64_unmasked:
1160; RV64:       # %bb.0:
1161; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1162; RV64-NEXT:    vmaxu.vx v8, v8, a0
1163; RV64-NEXT:    ret
1164  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1165  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1166  %v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1167  ret <vscale x 1 x i64> %v
1168}
1169
1170declare <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1171
1172define <vscale x 2 x i64> @vmaxu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1173; CHECK-LABEL: vmaxu_vv_nxv2i64:
1174; CHECK:       # %bb.0:
1175; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1176; CHECK-NEXT:    vmaxu.vv v8, v8, v10, v0.t
1177; CHECK-NEXT:    ret
1178  %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
1179  ret <vscale x 2 x i64> %v
1180}
1181
1182define <vscale x 2 x i64> @vmaxu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
1183; CHECK-LABEL: vmaxu_vv_nxv2i64_unmasked:
1184; CHECK:       # %bb.0:
1185; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1186; CHECK-NEXT:    vmaxu.vv v8, v8, v10
1187; CHECK-NEXT:    ret
1188  %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1189  ret <vscale x 2 x i64> %v
1190}
1191
1192define <vscale x 2 x i64> @vmaxu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1193; RV32-LABEL: vmaxu_vx_nxv2i64:
1194; RV32:       # %bb.0:
1195; RV32-NEXT:    addi sp, sp, -16
1196; RV32-NEXT:    .cfi_def_cfa_offset 16
1197; RV32-NEXT:    sw a0, 8(sp)
1198; RV32-NEXT:    sw a1, 12(sp)
1199; RV32-NEXT:    addi a0, sp, 8
1200; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1201; RV32-NEXT:    vlse64.v v10, (a0), zero
1202; RV32-NEXT:    vmaxu.vv v8, v8, v10, v0.t
1203; RV32-NEXT:    addi sp, sp, 16
1204; RV32-NEXT:    .cfi_def_cfa_offset 0
1205; RV32-NEXT:    ret
1206;
1207; RV64-LABEL: vmaxu_vx_nxv2i64:
1208; RV64:       # %bb.0:
1209; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1210; RV64-NEXT:    vmaxu.vx v8, v8, a0, v0.t
1211; RV64-NEXT:    ret
1212  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1213  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1214  %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1215  ret <vscale x 2 x i64> %v
1216}
1217
1218define <vscale x 2 x i64> @vmaxu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1219; RV32-LABEL: vmaxu_vx_nxv2i64_unmasked:
1220; RV32:       # %bb.0:
1221; RV32-NEXT:    addi sp, sp, -16
1222; RV32-NEXT:    .cfi_def_cfa_offset 16
1223; RV32-NEXT:    sw a0, 8(sp)
1224; RV32-NEXT:    sw a1, 12(sp)
1225; RV32-NEXT:    addi a0, sp, 8
1226; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1227; RV32-NEXT:    vlse64.v v10, (a0), zero
1228; RV32-NEXT:    vmaxu.vv v8, v8, v10
1229; RV32-NEXT:    addi sp, sp, 16
1230; RV32-NEXT:    .cfi_def_cfa_offset 0
1231; RV32-NEXT:    ret
1232;
1233; RV64-LABEL: vmaxu_vx_nxv2i64_unmasked:
1234; RV64:       # %bb.0:
1235; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1236; RV64-NEXT:    vmaxu.vx v8, v8, a0
1237; RV64-NEXT:    ret
1238  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1239  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1240  %v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1241  ret <vscale x 2 x i64> %v
1242}
1243
1244declare <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1245
1246define <vscale x 4 x i64> @vmaxu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1247; CHECK-LABEL: vmaxu_vv_nxv4i64:
1248; CHECK:       # %bb.0:
1249; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1250; CHECK-NEXT:    vmaxu.vv v8, v8, v12, v0.t
1251; CHECK-NEXT:    ret
1252  %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1253  ret <vscale x 4 x i64> %v
1254}
1255
1256define <vscale x 4 x i64> @vmaxu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1257; CHECK-LABEL: vmaxu_vv_nxv4i64_unmasked:
1258; CHECK:       # %bb.0:
1259; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1260; CHECK-NEXT:    vmaxu.vv v8, v8, v12
1261; CHECK-NEXT:    ret
1262  %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1263  ret <vscale x 4 x i64> %v
1264}
1265
1266define <vscale x 4 x i64> @vmaxu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1267; RV32-LABEL: vmaxu_vx_nxv4i64:
1268; RV32:       # %bb.0:
1269; RV32-NEXT:    addi sp, sp, -16
1270; RV32-NEXT:    .cfi_def_cfa_offset 16
1271; RV32-NEXT:    sw a0, 8(sp)
1272; RV32-NEXT:    sw a1, 12(sp)
1273; RV32-NEXT:    addi a0, sp, 8
1274; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1275; RV32-NEXT:    vlse64.v v12, (a0), zero
1276; RV32-NEXT:    vmaxu.vv v8, v8, v12, v0.t
1277; RV32-NEXT:    addi sp, sp, 16
1278; RV32-NEXT:    .cfi_def_cfa_offset 0
1279; RV32-NEXT:    ret
1280;
1281; RV64-LABEL: vmaxu_vx_nxv4i64:
1282; RV64:       # %bb.0:
1283; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1284; RV64-NEXT:    vmaxu.vx v8, v8, a0, v0.t
1285; RV64-NEXT:    ret
1286  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1287  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1288  %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1289  ret <vscale x 4 x i64> %v
1290}
1291
1292define <vscale x 4 x i64> @vmaxu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1293; RV32-LABEL: vmaxu_vx_nxv4i64_unmasked:
1294; RV32:       # %bb.0:
1295; RV32-NEXT:    addi sp, sp, -16
1296; RV32-NEXT:    .cfi_def_cfa_offset 16
1297; RV32-NEXT:    sw a0, 8(sp)
1298; RV32-NEXT:    sw a1, 12(sp)
1299; RV32-NEXT:    addi a0, sp, 8
1300; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1301; RV32-NEXT:    vlse64.v v12, (a0), zero
1302; RV32-NEXT:    vmaxu.vv v8, v8, v12
1303; RV32-NEXT:    addi sp, sp, 16
1304; RV32-NEXT:    .cfi_def_cfa_offset 0
1305; RV32-NEXT:    ret
1306;
1307; RV64-LABEL: vmaxu_vx_nxv4i64_unmasked:
1308; RV64:       # %bb.0:
1309; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1310; RV64-NEXT:    vmaxu.vx v8, v8, a0
1311; RV64-NEXT:    ret
1312  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1313  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1314  %v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1315  ret <vscale x 4 x i64> %v
1316}
1317
1318declare <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1319
1320define <vscale x 8 x i64> @vmaxu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1321; CHECK-LABEL: vmaxu_vv_nxv8i64:
1322; CHECK:       # %bb.0:
1323; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1324; CHECK-NEXT:    vmaxu.vv v8, v8, v16, v0.t
1325; CHECK-NEXT:    ret
1326  %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1327  ret <vscale x 8 x i64> %v
1328}
1329
1330define <vscale x 8 x i64> @vmaxu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1331; CHECK-LABEL: vmaxu_vv_nxv8i64_unmasked:
1332; CHECK:       # %bb.0:
1333; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1334; CHECK-NEXT:    vmaxu.vv v8, v8, v16
1335; CHECK-NEXT:    ret
1336  %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1337  ret <vscale x 8 x i64> %v
1338}
1339
1340define <vscale x 8 x i64> @vmaxu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1341; RV32-LABEL: vmaxu_vx_nxv8i64:
1342; RV32:       # %bb.0:
1343; RV32-NEXT:    addi sp, sp, -16
1344; RV32-NEXT:    .cfi_def_cfa_offset 16
1345; RV32-NEXT:    sw a0, 8(sp)
1346; RV32-NEXT:    sw a1, 12(sp)
1347; RV32-NEXT:    addi a0, sp, 8
1348; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1349; RV32-NEXT:    vlse64.v v16, (a0), zero
1350; RV32-NEXT:    vmaxu.vv v8, v8, v16, v0.t
1351; RV32-NEXT:    addi sp, sp, 16
1352; RV32-NEXT:    .cfi_def_cfa_offset 0
1353; RV32-NEXT:    ret
1354;
1355; RV64-LABEL: vmaxu_vx_nxv8i64:
1356; RV64:       # %bb.0:
1357; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1358; RV64-NEXT:    vmaxu.vx v8, v8, a0, v0.t
1359; RV64-NEXT:    ret
1360  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1361  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1362  %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1363  ret <vscale x 8 x i64> %v
1364}
1365
1366define <vscale x 8 x i64> @vmaxu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1367; RV32-LABEL: vmaxu_vx_nxv8i64_unmasked:
1368; RV32:       # %bb.0:
1369; RV32-NEXT:    addi sp, sp, -16
1370; RV32-NEXT:    .cfi_def_cfa_offset 16
1371; RV32-NEXT:    sw a0, 8(sp)
1372; RV32-NEXT:    sw a1, 12(sp)
1373; RV32-NEXT:    addi a0, sp, 8
1374; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1375; RV32-NEXT:    vlse64.v v16, (a0), zero
1376; RV32-NEXT:    vmaxu.vv v8, v8, v16
1377; RV32-NEXT:    addi sp, sp, 16
1378; RV32-NEXT:    .cfi_def_cfa_offset 0
1379; RV32-NEXT:    ret
1380;
1381; RV64-LABEL: vmaxu_vx_nxv8i64_unmasked:
1382; RV64:       # %bb.0:
1383; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1384; RV64-NEXT:    vmaxu.vx v8, v8, a0
1385; RV64-NEXT:    ret
1386  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1387  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1388  %v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1389  ret <vscale x 8 x i64> %v
1390}
1391