xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll (revision 36e4176f1d83d04cdebb4e1870561099b2478d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 8 x i7> @llvm.vp.smin.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
8
9define <vscale x 8 x i7> @vmin_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10; CHECK-LABEL: vmin_vx_nxv8i7:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
13; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
14; CHECK-NEXT:    vmv.v.x v9, a0
15; CHECK-NEXT:    vsra.vi v8, v8, 1, v0.t
16; CHECK-NEXT:    vsll.vi v9, v9, 1, v0.t
17; CHECK-NEXT:    vsra.vi v9, v9, 1, v0.t
18; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
19; CHECK-NEXT:    ret
20  %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
21  %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
22  %v = call <vscale x 8 x i7> @llvm.vp.smin.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
23  ret <vscale x 8 x i7> %v
24}
25
26declare <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
27
28define <vscale x 1 x i8> @vmin_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
29; CHECK-LABEL: vmin_vv_nxv1i8:
30; CHECK:       # %bb.0:
31; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
32; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
33; CHECK-NEXT:    ret
34  %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
35  ret <vscale x 1 x i8> %v
36}
37
38define <vscale x 1 x i8> @vmin_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
39; CHECK-LABEL: vmin_vv_nxv1i8_unmasked:
40; CHECK:       # %bb.0:
41; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
42; CHECK-NEXT:    vmin.vv v8, v8, v9
43; CHECK-NEXT:    ret
44  %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
45  ret <vscale x 1 x i8> %v
46}
47
48define <vscale x 1 x i8> @vmin_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
49; CHECK-LABEL: vmin_vx_nxv1i8:
50; CHECK:       # %bb.0:
51; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
52; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
53; CHECK-NEXT:    ret
54  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
55  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
56  %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
57  ret <vscale x 1 x i8> %v
58}
59
60define <vscale x 1 x i8> @vmin_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
61; CHECK-LABEL: vmin_vx_nxv1i8_commute:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
64; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
65; CHECK-NEXT:    ret
66  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
67  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
68  %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
69  ret <vscale x 1 x i8> %v
70}
71
72define <vscale x 1 x i8> @vmin_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
73; CHECK-LABEL: vmin_vx_nxv1i8_unmasked:
74; CHECK:       # %bb.0:
75; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
76; CHECK-NEXT:    vmin.vx v8, v8, a0
77; CHECK-NEXT:    ret
78  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
79  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
80  %v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
81  ret <vscale x 1 x i8> %v
82}
83
84declare <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
85
86define <vscale x 2 x i8> @vmin_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
87; CHECK-LABEL: vmin_vv_nxv2i8:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
90; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
91; CHECK-NEXT:    ret
92  %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
93  ret <vscale x 2 x i8> %v
94}
95
96define <vscale x 2 x i8> @vmin_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
97; CHECK-LABEL: vmin_vv_nxv2i8_unmasked:
98; CHECK:       # %bb.0:
99; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
100; CHECK-NEXT:    vmin.vv v8, v8, v9
101; CHECK-NEXT:    ret
102  %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
103  ret <vscale x 2 x i8> %v
104}
105
106define <vscale x 2 x i8> @vmin_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
107; CHECK-LABEL: vmin_vx_nxv2i8:
108; CHECK:       # %bb.0:
109; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
110; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
111; CHECK-NEXT:    ret
112  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
113  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
114  %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
115  ret <vscale x 2 x i8> %v
116}
117
118define <vscale x 2 x i8> @vmin_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
119; CHECK-LABEL: vmin_vx_nxv2i8_unmasked:
120; CHECK:       # %bb.0:
121; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
122; CHECK-NEXT:    vmin.vx v8, v8, a0
123; CHECK-NEXT:    ret
124  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
125  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
126  %v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
127  ret <vscale x 2 x i8> %v
128}
129
130declare <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
131
132define <vscale x 3 x i8> @vmin_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
133; CHECK-LABEL: vmin_vv_nxv3i8:
134; CHECK:       # %bb.0:
135; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
136; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
137; CHECK-NEXT:    ret
138  %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
139  ret <vscale x 3 x i8> %v
140}
141
142define <vscale x 3 x i8> @vmin_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
143; CHECK-LABEL: vmin_vv_nxv3i8_unmasked:
144; CHECK:       # %bb.0:
145; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
146; CHECK-NEXT:    vmin.vv v8, v8, v9
147; CHECK-NEXT:    ret
148  %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
149  ret <vscale x 3 x i8> %v
150}
151
152define <vscale x 3 x i8> @vmin_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
153; CHECK-LABEL: vmin_vx_nxv3i8:
154; CHECK:       # %bb.0:
155; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
156; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
157; CHECK-NEXT:    ret
158  %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
159  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
160  %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
161  ret <vscale x 3 x i8> %v
162}
163
164define <vscale x 3 x i8> @vmin_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
165; CHECK-LABEL: vmin_vx_nxv3i8_unmasked:
166; CHECK:       # %bb.0:
167; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
168; CHECK-NEXT:    vmin.vx v8, v8, a0
169; CHECK-NEXT:    ret
170  %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
171  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
172  %v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
173  ret <vscale x 3 x i8> %v
174}
175
176declare <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
177
178define <vscale x 4 x i8> @vmin_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
179; CHECK-LABEL: vmin_vv_nxv4i8:
180; CHECK:       # %bb.0:
181; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
182; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
183; CHECK-NEXT:    ret
184  %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
185  ret <vscale x 4 x i8> %v
186}
187
188define <vscale x 4 x i8> @vmin_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
189; CHECK-LABEL: vmin_vv_nxv4i8_unmasked:
190; CHECK:       # %bb.0:
191; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
192; CHECK-NEXT:    vmin.vv v8, v8, v9
193; CHECK-NEXT:    ret
194  %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
195  ret <vscale x 4 x i8> %v
196}
197
198define <vscale x 4 x i8> @vmin_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
199; CHECK-LABEL: vmin_vx_nxv4i8:
200; CHECK:       # %bb.0:
201; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
202; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
203; CHECK-NEXT:    ret
204  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
205  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
206  %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
207  ret <vscale x 4 x i8> %v
208}
209
210define <vscale x 4 x i8> @vmin_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
211; CHECK-LABEL: vmin_vx_nxv4i8_unmasked:
212; CHECK:       # %bb.0:
213; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
214; CHECK-NEXT:    vmin.vx v8, v8, a0
215; CHECK-NEXT:    ret
216  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
217  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
218  %v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
219  ret <vscale x 4 x i8> %v
220}
221
222declare <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
223
224define <vscale x 8 x i8> @vmin_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
225; CHECK-LABEL: vmin_vv_nxv8i8:
226; CHECK:       # %bb.0:
227; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
228; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
229; CHECK-NEXT:    ret
230  %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
231  ret <vscale x 8 x i8> %v
232}
233
234define <vscale x 8 x i8> @vmin_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
235; CHECK-LABEL: vmin_vv_nxv8i8_unmasked:
236; CHECK:       # %bb.0:
237; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
238; CHECK-NEXT:    vmin.vv v8, v8, v9
239; CHECK-NEXT:    ret
240  %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
241  ret <vscale x 8 x i8> %v
242}
243
244define <vscale x 8 x i8> @vmin_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
245; CHECK-LABEL: vmin_vx_nxv8i8:
246; CHECK:       # %bb.0:
247; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
248; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
249; CHECK-NEXT:    ret
250  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
251  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
252  %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
253  ret <vscale x 8 x i8> %v
254}
255
256define <vscale x 8 x i8> @vmin_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
257; CHECK-LABEL: vmin_vx_nxv8i8_unmasked:
258; CHECK:       # %bb.0:
259; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
260; CHECK-NEXT:    vmin.vx v8, v8, a0
261; CHECK-NEXT:    ret
262  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
263  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
264  %v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
265  ret <vscale x 8 x i8> %v
266}
267
268declare <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
269
270define <vscale x 16 x i8> @vmin_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
271; CHECK-LABEL: vmin_vv_nxv16i8:
272; CHECK:       # %bb.0:
273; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
274; CHECK-NEXT:    vmin.vv v8, v8, v10, v0.t
275; CHECK-NEXT:    ret
276  %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
277  ret <vscale x 16 x i8> %v
278}
279
280define <vscale x 16 x i8> @vmin_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
281; CHECK-LABEL: vmin_vv_nxv16i8_unmasked:
282; CHECK:       # %bb.0:
283; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
284; CHECK-NEXT:    vmin.vv v8, v8, v10
285; CHECK-NEXT:    ret
286  %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
287  ret <vscale x 16 x i8> %v
288}
289
290define <vscale x 16 x i8> @vmin_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
291; CHECK-LABEL: vmin_vx_nxv16i8:
292; CHECK:       # %bb.0:
293; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
294; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
295; CHECK-NEXT:    ret
296  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
297  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
298  %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
299  ret <vscale x 16 x i8> %v
300}
301
302define <vscale x 16 x i8> @vmin_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
303; CHECK-LABEL: vmin_vx_nxv16i8_unmasked:
304; CHECK:       # %bb.0:
305; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
306; CHECK-NEXT:    vmin.vx v8, v8, a0
307; CHECK-NEXT:    ret
308  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
309  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
310  %v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
311  ret <vscale x 16 x i8> %v
312}
313
314declare <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
315
316define <vscale x 32 x i8> @vmin_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
317; CHECK-LABEL: vmin_vv_nxv32i8:
318; CHECK:       # %bb.0:
319; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
320; CHECK-NEXT:    vmin.vv v8, v8, v12, v0.t
321; CHECK-NEXT:    ret
322  %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
323  ret <vscale x 32 x i8> %v
324}
325
326define <vscale x 32 x i8> @vmin_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
327; CHECK-LABEL: vmin_vv_nxv32i8_unmasked:
328; CHECK:       # %bb.0:
329; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
330; CHECK-NEXT:    vmin.vv v8, v8, v12
331; CHECK-NEXT:    ret
332  %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
333  ret <vscale x 32 x i8> %v
334}
335
336define <vscale x 32 x i8> @vmin_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
337; CHECK-LABEL: vmin_vx_nxv32i8:
338; CHECK:       # %bb.0:
339; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
340; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
341; CHECK-NEXT:    ret
342  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
343  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
344  %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
345  ret <vscale x 32 x i8> %v
346}
347
348define <vscale x 32 x i8> @vmin_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
349; CHECK-LABEL: vmin_vx_nxv32i8_unmasked:
350; CHECK:       # %bb.0:
351; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
352; CHECK-NEXT:    vmin.vx v8, v8, a0
353; CHECK-NEXT:    ret
354  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
355  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
356  %v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
357  ret <vscale x 32 x i8> %v
358}
359
360declare <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
361
362define <vscale x 64 x i8> @vmin_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
363; CHECK-LABEL: vmin_vv_nxv64i8:
364; CHECK:       # %bb.0:
365; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
366; CHECK-NEXT:    vmin.vv v8, v8, v16, v0.t
367; CHECK-NEXT:    ret
368  %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
369  ret <vscale x 64 x i8> %v
370}
371
372define <vscale x 64 x i8> @vmin_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
373; CHECK-LABEL: vmin_vv_nxv64i8_unmasked:
374; CHECK:       # %bb.0:
375; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
376; CHECK-NEXT:    vmin.vv v8, v8, v16
377; CHECK-NEXT:    ret
378  %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
379  ret <vscale x 64 x i8> %v
380}
381
382define <vscale x 64 x i8> @vmin_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
383; CHECK-LABEL: vmin_vx_nxv64i8:
384; CHECK:       # %bb.0:
385; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
386; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
387; CHECK-NEXT:    ret
388  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
389  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
390  %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
391  ret <vscale x 64 x i8> %v
392}
393
394define <vscale x 64 x i8> @vmin_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
395; CHECK-LABEL: vmin_vx_nxv64i8_unmasked:
396; CHECK:       # %bb.0:
397; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
398; CHECK-NEXT:    vmin.vx v8, v8, a0
399; CHECK-NEXT:    ret
400  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
401  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
402  %v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
403  ret <vscale x 64 x i8> %v
404}
405
406; Test that split-legalization works when the mask itself needs splitting.
407
408declare <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8>, <vscale x 128 x i8>, <vscale x 128 x i1>, i32)
409
410define <vscale x 128 x i8> @vmin_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vscale x 128 x i1> %m, i32 zeroext %evl) {
411; CHECK-LABEL: vmin_vx_nxv128i8:
412; CHECK:       # %bb.0:
413; CHECK-NEXT:    vsetvli a3, zero, e8, m8, ta, ma
414; CHECK-NEXT:    vmv1r.v v24, v0
415; CHECK-NEXT:    vlm.v v0, (a1)
416; CHECK-NEXT:    csrr a1, vlenb
417; CHECK-NEXT:    slli a1, a1, 3
418; CHECK-NEXT:    sub a3, a2, a1
419; CHECK-NEXT:    sltu a4, a2, a3
420; CHECK-NEXT:    addi a4, a4, -1
421; CHECK-NEXT:    and a3, a4, a3
422; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
423; CHECK-NEXT:    vmin.vx v16, v16, a0, v0.t
424; CHECK-NEXT:    bltu a2, a1, .LBB34_2
425; CHECK-NEXT:  # %bb.1:
426; CHECK-NEXT:    mv a2, a1
427; CHECK-NEXT:  .LBB34_2:
428; CHECK-NEXT:    vmv1r.v v0, v24
429; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
430; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
431; CHECK-NEXT:    ret
432  %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
433  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
434  %v = call <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
435  ret <vscale x 128 x i8> %v
436}
437
438define <vscale x 128 x i8> @vmin_vx_nxv128i8_unmasked(<vscale x 128 x i8> %va, i8 %b, i32 zeroext %evl) {
439; CHECK-LABEL: vmin_vx_nxv128i8_unmasked:
440; CHECK:       # %bb.0:
441; CHECK-NEXT:    csrr a2, vlenb
442; CHECK-NEXT:    slli a2, a2, 3
443; CHECK-NEXT:    sub a3, a1, a2
444; CHECK-NEXT:    sltu a4, a1, a3
445; CHECK-NEXT:    addi a4, a4, -1
446; CHECK-NEXT:    and a3, a4, a3
447; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
448; CHECK-NEXT:    vmin.vx v16, v16, a0
449; CHECK-NEXT:    bltu a1, a2, .LBB35_2
450; CHECK-NEXT:  # %bb.1:
451; CHECK-NEXT:    mv a1, a2
452; CHECK-NEXT:  .LBB35_2:
453; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
454; CHECK-NEXT:    vmin.vx v8, v8, a0
455; CHECK-NEXT:    ret
456  %elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
457  %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
458  %v = call <vscale x 128 x i8> @llvm.vp.smin.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> splat (i1 true), i32 %evl)
459  ret <vscale x 128 x i8> %v
460}
461
462declare <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
463
464define <vscale x 1 x i16> @vmin_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
465; CHECK-LABEL: vmin_vv_nxv1i16:
466; CHECK:       # %bb.0:
467; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
468; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
469; CHECK-NEXT:    ret
470  %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
471  ret <vscale x 1 x i16> %v
472}
473
474define <vscale x 1 x i16> @vmin_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
475; CHECK-LABEL: vmin_vv_nxv1i16_unmasked:
476; CHECK:       # %bb.0:
477; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
478; CHECK-NEXT:    vmin.vv v8, v8, v9
479; CHECK-NEXT:    ret
480  %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
481  ret <vscale x 1 x i16> %v
482}
483
484define <vscale x 1 x i16> @vmin_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
485; CHECK-LABEL: vmin_vx_nxv1i16:
486; CHECK:       # %bb.0:
487; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
488; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
489; CHECK-NEXT:    ret
490  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
491  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
492  %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
493  ret <vscale x 1 x i16> %v
494}
495
496define <vscale x 1 x i16> @vmin_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
497; CHECK-LABEL: vmin_vx_nxv1i16_unmasked:
498; CHECK:       # %bb.0:
499; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
500; CHECK-NEXT:    vmin.vx v8, v8, a0
501; CHECK-NEXT:    ret
502  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
503  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
504  %v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
505  ret <vscale x 1 x i16> %v
506}
507
508declare <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
509
510define <vscale x 2 x i16> @vmin_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
511; CHECK-LABEL: vmin_vv_nxv2i16:
512; CHECK:       # %bb.0:
513; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
514; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
515; CHECK-NEXT:    ret
516  %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
517  ret <vscale x 2 x i16> %v
518}
519
520define <vscale x 2 x i16> @vmin_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
521; CHECK-LABEL: vmin_vv_nxv2i16_unmasked:
522; CHECK:       # %bb.0:
523; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
524; CHECK-NEXT:    vmin.vv v8, v8, v9
525; CHECK-NEXT:    ret
526  %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
527  ret <vscale x 2 x i16> %v
528}
529
530define <vscale x 2 x i16> @vmin_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
531; CHECK-LABEL: vmin_vx_nxv2i16:
532; CHECK:       # %bb.0:
533; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
534; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
535; CHECK-NEXT:    ret
536  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
537  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
538  %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
539  ret <vscale x 2 x i16> %v
540}
541
542define <vscale x 2 x i16> @vmin_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
543; CHECK-LABEL: vmin_vx_nxv2i16_unmasked:
544; CHECK:       # %bb.0:
545; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
546; CHECK-NEXT:    vmin.vx v8, v8, a0
547; CHECK-NEXT:    ret
548  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
549  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
550  %v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
551  ret <vscale x 2 x i16> %v
552}
553
554declare <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
555
556define <vscale x 4 x i16> @vmin_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
557; CHECK-LABEL: vmin_vv_nxv4i16:
558; CHECK:       # %bb.0:
559; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
560; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
561; CHECK-NEXT:    ret
562  %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
563  ret <vscale x 4 x i16> %v
564}
565
566define <vscale x 4 x i16> @vmin_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
567; CHECK-LABEL: vmin_vv_nxv4i16_unmasked:
568; CHECK:       # %bb.0:
569; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
570; CHECK-NEXT:    vmin.vv v8, v8, v9
571; CHECK-NEXT:    ret
572  %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
573  ret <vscale x 4 x i16> %v
574}
575
576define <vscale x 4 x i16> @vmin_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
577; CHECK-LABEL: vmin_vx_nxv4i16:
578; CHECK:       # %bb.0:
579; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
580; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
581; CHECK-NEXT:    ret
582  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
583  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
584  %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
585  ret <vscale x 4 x i16> %v
586}
587
588define <vscale x 4 x i16> @vmin_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
589; CHECK-LABEL: vmin_vx_nxv4i16_unmasked:
590; CHECK:       # %bb.0:
591; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
592; CHECK-NEXT:    vmin.vx v8, v8, a0
593; CHECK-NEXT:    ret
594  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
595  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
596  %v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
597  ret <vscale x 4 x i16> %v
598}
599
600declare <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
601
602define <vscale x 8 x i16> @vmin_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
603; CHECK-LABEL: vmin_vv_nxv8i16:
604; CHECK:       # %bb.0:
605; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
606; CHECK-NEXT:    vmin.vv v8, v8, v10, v0.t
607; CHECK-NEXT:    ret
608  %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
609  ret <vscale x 8 x i16> %v
610}
611
612define <vscale x 8 x i16> @vmin_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
613; CHECK-LABEL: vmin_vv_nxv8i16_unmasked:
614; CHECK:       # %bb.0:
615; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
616; CHECK-NEXT:    vmin.vv v8, v8, v10
617; CHECK-NEXT:    ret
618  %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
619  ret <vscale x 8 x i16> %v
620}
621
622define <vscale x 8 x i16> @vmin_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
623; CHECK-LABEL: vmin_vx_nxv8i16:
624; CHECK:       # %bb.0:
625; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
626; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
627; CHECK-NEXT:    ret
628  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
629  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
630  %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
631  ret <vscale x 8 x i16> %v
632}
633
634define <vscale x 8 x i16> @vmin_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
635; CHECK-LABEL: vmin_vx_nxv8i16_unmasked:
636; CHECK:       # %bb.0:
637; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
638; CHECK-NEXT:    vmin.vx v8, v8, a0
639; CHECK-NEXT:    ret
640  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
641  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
642  %v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
643  ret <vscale x 8 x i16> %v
644}
645
646declare <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
647
648define <vscale x 16 x i16> @vmin_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
649; CHECK-LABEL: vmin_vv_nxv16i16:
650; CHECK:       # %bb.0:
651; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
652; CHECK-NEXT:    vmin.vv v8, v8, v12, v0.t
653; CHECK-NEXT:    ret
654  %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
655  ret <vscale x 16 x i16> %v
656}
657
658define <vscale x 16 x i16> @vmin_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
659; CHECK-LABEL: vmin_vv_nxv16i16_unmasked:
660; CHECK:       # %bb.0:
661; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
662; CHECK-NEXT:    vmin.vv v8, v8, v12
663; CHECK-NEXT:    ret
664  %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
665  ret <vscale x 16 x i16> %v
666}
667
668define <vscale x 16 x i16> @vmin_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
669; CHECK-LABEL: vmin_vx_nxv16i16:
670; CHECK:       # %bb.0:
671; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
672; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
673; CHECK-NEXT:    ret
674  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
675  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
676  %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
677  ret <vscale x 16 x i16> %v
678}
679
680define <vscale x 16 x i16> @vmin_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
681; CHECK-LABEL: vmin_vx_nxv16i16_unmasked:
682; CHECK:       # %bb.0:
683; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
684; CHECK-NEXT:    vmin.vx v8, v8, a0
685; CHECK-NEXT:    ret
686  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
687  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
688  %v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
689  ret <vscale x 16 x i16> %v
690}
691
692declare <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
693
694define <vscale x 32 x i16> @vmin_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
695; CHECK-LABEL: vmin_vv_nxv32i16:
696; CHECK:       # %bb.0:
697; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
698; CHECK-NEXT:    vmin.vv v8, v8, v16, v0.t
699; CHECK-NEXT:    ret
700  %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
701  ret <vscale x 32 x i16> %v
702}
703
704define <vscale x 32 x i16> @vmin_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
705; CHECK-LABEL: vmin_vv_nxv32i16_unmasked:
706; CHECK:       # %bb.0:
707; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
708; CHECK-NEXT:    vmin.vv v8, v8, v16
709; CHECK-NEXT:    ret
710  %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
711  ret <vscale x 32 x i16> %v
712}
713
714define <vscale x 32 x i16> @vmin_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
715; CHECK-LABEL: vmin_vx_nxv32i16:
716; CHECK:       # %bb.0:
717; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
718; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
719; CHECK-NEXT:    ret
720  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
721  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
722  %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
723  ret <vscale x 32 x i16> %v
724}
725
726define <vscale x 32 x i16> @vmin_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
727; CHECK-LABEL: vmin_vx_nxv32i16_unmasked:
728; CHECK:       # %bb.0:
729; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
730; CHECK-NEXT:    vmin.vx v8, v8, a0
731; CHECK-NEXT:    ret
732  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
733  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
734  %v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
735  ret <vscale x 32 x i16> %v
736}
737
738declare <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
739
740define <vscale x 1 x i32> @vmin_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
741; CHECK-LABEL: vmin_vv_nxv1i32:
742; CHECK:       # %bb.0:
743; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
744; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
745; CHECK-NEXT:    ret
746  %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
747  ret <vscale x 1 x i32> %v
748}
749
750define <vscale x 1 x i32> @vmin_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
751; CHECK-LABEL: vmin_vv_nxv1i32_unmasked:
752; CHECK:       # %bb.0:
753; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
754; CHECK-NEXT:    vmin.vv v8, v8, v9
755; CHECK-NEXT:    ret
756  %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
757  ret <vscale x 1 x i32> %v
758}
759
760define <vscale x 1 x i32> @vmin_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
761; CHECK-LABEL: vmin_vx_nxv1i32:
762; CHECK:       # %bb.0:
763; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
764; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
765; CHECK-NEXT:    ret
766  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
767  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
768  %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
769  ret <vscale x 1 x i32> %v
770}
771
772define <vscale x 1 x i32> @vmin_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
773; CHECK-LABEL: vmin_vx_nxv1i32_unmasked:
774; CHECK:       # %bb.0:
775; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
776; CHECK-NEXT:    vmin.vx v8, v8, a0
777; CHECK-NEXT:    ret
778  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
779  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
780  %v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
781  ret <vscale x 1 x i32> %v
782}
783
784declare <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
785
786define <vscale x 2 x i32> @vmin_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
787; CHECK-LABEL: vmin_vv_nxv2i32:
788; CHECK:       # %bb.0:
789; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
790; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
791; CHECK-NEXT:    ret
792  %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
793  ret <vscale x 2 x i32> %v
794}
795
796define <vscale x 2 x i32> @vmin_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
797; CHECK-LABEL: vmin_vv_nxv2i32_unmasked:
798; CHECK:       # %bb.0:
799; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
800; CHECK-NEXT:    vmin.vv v8, v8, v9
801; CHECK-NEXT:    ret
802  %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
803  ret <vscale x 2 x i32> %v
804}
805
806define <vscale x 2 x i32> @vmin_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
807; CHECK-LABEL: vmin_vx_nxv2i32:
808; CHECK:       # %bb.0:
809; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
810; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
811; CHECK-NEXT:    ret
812  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
813  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
814  %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
815  ret <vscale x 2 x i32> %v
816}
817
818define <vscale x 2 x i32> @vmin_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
819; CHECK-LABEL: vmin_vx_nxv2i32_unmasked:
820; CHECK:       # %bb.0:
821; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
822; CHECK-NEXT:    vmin.vx v8, v8, a0
823; CHECK-NEXT:    ret
824  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
825  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
826  %v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
827  ret <vscale x 2 x i32> %v
828}
829
830declare <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
831
832define <vscale x 4 x i32> @vmin_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
833; CHECK-LABEL: vmin_vv_nxv4i32:
834; CHECK:       # %bb.0:
835; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
836; CHECK-NEXT:    vmin.vv v8, v8, v10, v0.t
837; CHECK-NEXT:    ret
838  %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
839  ret <vscale x 4 x i32> %v
840}
841
842define <vscale x 4 x i32> @vmin_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
843; CHECK-LABEL: vmin_vv_nxv4i32_unmasked:
844; CHECK:       # %bb.0:
845; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
846; CHECK-NEXT:    vmin.vv v8, v8, v10
847; CHECK-NEXT:    ret
848  %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
849  ret <vscale x 4 x i32> %v
850}
851
852define <vscale x 4 x i32> @vmin_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
853; CHECK-LABEL: vmin_vx_nxv4i32:
854; CHECK:       # %bb.0:
855; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
856; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
857; CHECK-NEXT:    ret
858  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
859  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
860  %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
861  ret <vscale x 4 x i32> %v
862}
863
864define <vscale x 4 x i32> @vmin_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
865; CHECK-LABEL: vmin_vx_nxv4i32_unmasked:
866; CHECK:       # %bb.0:
867; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
868; CHECK-NEXT:    vmin.vx v8, v8, a0
869; CHECK-NEXT:    ret
870  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
871  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
872  %v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
873  ret <vscale x 4 x i32> %v
874}
875
876declare <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
877
878define <vscale x 8 x i32> @vmin_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
879; CHECK-LABEL: vmin_vv_nxv8i32:
880; CHECK:       # %bb.0:
881; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
882; CHECK-NEXT:    vmin.vv v8, v8, v12, v0.t
883; CHECK-NEXT:    ret
884  %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
885  ret <vscale x 8 x i32> %v
886}
887
888define <vscale x 8 x i32> @vmin_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
889; CHECK-LABEL: vmin_vv_nxv8i32_unmasked:
890; CHECK:       # %bb.0:
891; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
892; CHECK-NEXT:    vmin.vv v8, v8, v12
893; CHECK-NEXT:    ret
894  %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
895  ret <vscale x 8 x i32> %v
896}
897
898define <vscale x 8 x i32> @vmin_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
899; CHECK-LABEL: vmin_vx_nxv8i32:
900; CHECK:       # %bb.0:
901; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
902; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
903; CHECK-NEXT:    ret
904  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
905  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
906  %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
907  ret <vscale x 8 x i32> %v
908}
909
910define <vscale x 8 x i32> @vmin_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
911; CHECK-LABEL: vmin_vx_nxv8i32_unmasked:
912; CHECK:       # %bb.0:
913; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
914; CHECK-NEXT:    vmin.vx v8, v8, a0
915; CHECK-NEXT:    ret
916  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
917  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
918  %v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
919  ret <vscale x 8 x i32> %v
920}
921
922declare <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
923
924define <vscale x 16 x i32> @vmin_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
925; CHECK-LABEL: vmin_vv_nxv16i32:
926; CHECK:       # %bb.0:
927; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
928; CHECK-NEXT:    vmin.vv v8, v8, v16, v0.t
929; CHECK-NEXT:    ret
930  %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
931  ret <vscale x 16 x i32> %v
932}
933
934define <vscale x 16 x i32> @vmin_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
935; CHECK-LABEL: vmin_vv_nxv16i32_unmasked:
936; CHECK:       # %bb.0:
937; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
938; CHECK-NEXT:    vmin.vv v8, v8, v16
939; CHECK-NEXT:    ret
940  %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
941  ret <vscale x 16 x i32> %v
942}
943
944define <vscale x 16 x i32> @vmin_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
945; CHECK-LABEL: vmin_vx_nxv16i32:
946; CHECK:       # %bb.0:
947; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
948; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
949; CHECK-NEXT:    ret
950  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
951  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
952  %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
953  ret <vscale x 16 x i32> %v
954}
955
956define <vscale x 16 x i32> @vmin_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
957; CHECK-LABEL: vmin_vx_nxv16i32_unmasked:
958; CHECK:       # %bb.0:
959; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
960; CHECK-NEXT:    vmin.vx v8, v8, a0
961; CHECK-NEXT:    ret
962  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
963  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
964  %v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
965  ret <vscale x 16 x i32> %v
966}
967
968; Test that split-legalization works then the mask needs manual splitting.
969
970declare <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, <vscale x 32 x i1>, i32)
971
972define <vscale x 32 x i32> @vmin_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
973; CHECK-LABEL: vmin_vx_nxv32i32:
974; CHECK:       # %bb.0:
975; CHECK-NEXT:    vsetvli a2, zero, e8, mf2, ta, ma
976; CHECK-NEXT:    vmv1r.v v24, v0
977; CHECK-NEXT:    csrr a2, vlenb
978; CHECK-NEXT:    srli a3, a2, 2
979; CHECK-NEXT:    slli a2, a2, 1
980; CHECK-NEXT:    vslidedown.vx v0, v0, a3
981; CHECK-NEXT:    sub a3, a1, a2
982; CHECK-NEXT:    sltu a4, a1, a3
983; CHECK-NEXT:    addi a4, a4, -1
984; CHECK-NEXT:    and a3, a4, a3
985; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
986; CHECK-NEXT:    vmin.vx v16, v16, a0, v0.t
987; CHECK-NEXT:    bltu a1, a2, .LBB80_2
988; CHECK-NEXT:  # %bb.1:
989; CHECK-NEXT:    mv a1, a2
990; CHECK-NEXT:  .LBB80_2:
991; CHECK-NEXT:    vmv1r.v v0, v24
992; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
993; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
994; CHECK-NEXT:    ret
995  %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
996  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
997  %v = call <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
998  ret <vscale x 32 x i32> %v
999}
1000
1001define <vscale x 32 x i32> @vmin_vx_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 %b, i32 zeroext %evl) {
1002; CHECK-LABEL: vmin_vx_nxv32i32_unmasked:
1003; CHECK:       # %bb.0:
1004; CHECK-NEXT:    csrr a2, vlenb
1005; CHECK-NEXT:    slli a2, a2, 1
1006; CHECK-NEXT:    sub a3, a1, a2
1007; CHECK-NEXT:    sltu a4, a1, a3
1008; CHECK-NEXT:    addi a4, a4, -1
1009; CHECK-NEXT:    and a3, a4, a3
1010; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
1011; CHECK-NEXT:    vmin.vx v16, v16, a0
1012; CHECK-NEXT:    bltu a1, a2, .LBB81_2
1013; CHECK-NEXT:  # %bb.1:
1014; CHECK-NEXT:    mv a1, a2
1015; CHECK-NEXT:  .LBB81_2:
1016; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1017; CHECK-NEXT:    vmin.vx v8, v8, a0
1018; CHECK-NEXT:    ret
1019  %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1020  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1021  %v = call <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
1022  ret <vscale x 32 x i32> %v
1023}
1024
1025; Test splitting when the %evl is a constant (albeit an unknown one).
1026
1027declare i32 @llvm.vscale.i32()
1028
1029; FIXME: The upper half of the operation is doing nothing.
1030; FIXME: The branches comparing vscale vs. vscale should be constant-foldable.
1031
1032define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1033; CHECK-LABEL: vmin_vx_nxv32i32_evl_nx8:
1034; CHECK:       # %bb.0:
1035; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
1036; CHECK-NEXT:    vmv1r.v v24, v0
1037; CHECK-NEXT:    csrr a1, vlenb
1038; CHECK-NEXT:    srli a3, a1, 2
1039; CHECK-NEXT:    slli a2, a1, 1
1040; CHECK-NEXT:    vslidedown.vx v0, v0, a3
1041; CHECK-NEXT:    sub a3, a1, a2
1042; CHECK-NEXT:    sltu a4, a1, a3
1043; CHECK-NEXT:    addi a4, a4, -1
1044; CHECK-NEXT:    and a3, a4, a3
1045; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
1046; CHECK-NEXT:    vmin.vx v16, v16, a0, v0.t
1047; CHECK-NEXT:    bltu a1, a2, .LBB82_2
1048; CHECK-NEXT:  # %bb.1:
1049; CHECK-NEXT:    mv a1, a2
1050; CHECK-NEXT:  .LBB82_2:
1051; CHECK-NEXT:    vmv1r.v v0, v24
1052; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1053; CHECK-NEXT:    vmin.vx v8, v8, a0, v0.t
1054; CHECK-NEXT:    ret
1055  %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1056  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1057  %evl = call i32 @llvm.vscale.i32()
1058  %evl0 = mul i32 %evl, 8
1059  %v = call <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
1060  ret <vscale x 32 x i32> %v
1061}
1062
1063; FIXME: The upper half of the operation is doing nothing but we don't catch
1064; that on RV64; we issue a usubsat(and (vscale x 16), 0xffffffff, vscale x 16)
1065; (the "original" %evl is the "and", due to known-bits issues with legalizing
1066; the i32 %evl to i64) and this isn't detected as 0.
1067; This could be resolved in the future with more detailed KnownBits analysis
1068; for ISD::VSCALE.
1069
1070define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i32 %b, <vscale x 32 x i1> %m) {
1071; RV32-LABEL: vmin_vx_nxv32i32_evl_nx16:
1072; RV32:       # %bb.0:
1073; RV32-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
1074; RV32-NEXT:    vmin.vx v8, v8, a0, v0.t
1075; RV32-NEXT:    ret
1076;
1077; RV64-LABEL: vmin_vx_nxv32i32_evl_nx16:
1078; RV64:       # %bb.0:
1079; RV64-NEXT:    csrr a1, vlenb
1080; RV64-NEXT:    srli a1, a1, 2
1081; RV64-NEXT:    vsetvli a2, zero, e8, mf2, ta, ma
1082; RV64-NEXT:    vslidedown.vx v24, v0, a1
1083; RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
1084; RV64-NEXT:    vmin.vx v8, v8, a0, v0.t
1085; RV64-NEXT:    vmv1r.v v0, v24
1086; RV64-NEXT:    vsetivli zero, 0, e32, m8, ta, ma
1087; RV64-NEXT:    vmin.vx v16, v16, a0, v0.t
1088; RV64-NEXT:    ret
1089  %elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
1090  %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
1091  %evl = call i32 @llvm.vscale.i32()
1092  %evl0 = mul i32 %evl, 16
1093  %v = call <vscale x 32 x i32> @llvm.vp.smin.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl0)
1094  ret <vscale x 32 x i32> %v
1095}
1096
1097declare <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1098
1099define <vscale x 1 x i64> @vmin_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1100; CHECK-LABEL: vmin_vv_nxv1i64:
1101; CHECK:       # %bb.0:
1102; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1103; CHECK-NEXT:    vmin.vv v8, v8, v9, v0.t
1104; CHECK-NEXT:    ret
1105  %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
1106  ret <vscale x 1 x i64> %v
1107}
1108
1109define <vscale x 1 x i64> @vmin_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
1110; CHECK-LABEL: vmin_vv_nxv1i64_unmasked:
1111; CHECK:       # %bb.0:
1112; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1113; CHECK-NEXT:    vmin.vv v8, v8, v9
1114; CHECK-NEXT:    ret
1115  %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1116  ret <vscale x 1 x i64> %v
1117}
1118
1119define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1120; RV32-LABEL: vmin_vx_nxv1i64:
1121; RV32:       # %bb.0:
1122; RV32-NEXT:    addi sp, sp, -16
1123; RV32-NEXT:    .cfi_def_cfa_offset 16
1124; RV32-NEXT:    sw a0, 8(sp)
1125; RV32-NEXT:    sw a1, 12(sp)
1126; RV32-NEXT:    addi a0, sp, 8
1127; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1128; RV32-NEXT:    vlse64.v v9, (a0), zero
1129; RV32-NEXT:    vmin.vv v8, v8, v9, v0.t
1130; RV32-NEXT:    addi sp, sp, 16
1131; RV32-NEXT:    .cfi_def_cfa_offset 0
1132; RV32-NEXT:    ret
1133;
1134; RV64-LABEL: vmin_vx_nxv1i64:
1135; RV64:       # %bb.0:
1136; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1137; RV64-NEXT:    vmin.vx v8, v8, a0, v0.t
1138; RV64-NEXT:    ret
1139  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1140  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1141  %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
1142  ret <vscale x 1 x i64> %v
1143}
1144
1145define <vscale x 1 x i64> @vmin_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
1146; RV32-LABEL: vmin_vx_nxv1i64_unmasked:
1147; RV32:       # %bb.0:
1148; RV32-NEXT:    addi sp, sp, -16
1149; RV32-NEXT:    .cfi_def_cfa_offset 16
1150; RV32-NEXT:    sw a0, 8(sp)
1151; RV32-NEXT:    sw a1, 12(sp)
1152; RV32-NEXT:    addi a0, sp, 8
1153; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1154; RV32-NEXT:    vlse64.v v9, (a0), zero
1155; RV32-NEXT:    vmin.vv v8, v8, v9
1156; RV32-NEXT:    addi sp, sp, 16
1157; RV32-NEXT:    .cfi_def_cfa_offset 0
1158; RV32-NEXT:    ret
1159;
1160; RV64-LABEL: vmin_vx_nxv1i64_unmasked:
1161; RV64:       # %bb.0:
1162; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1163; RV64-NEXT:    vmin.vx v8, v8, a0
1164; RV64-NEXT:    ret
1165  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1166  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1167  %v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1168  ret <vscale x 1 x i64> %v
1169}
1170
1171declare <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1172
1173define <vscale x 2 x i64> @vmin_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1174; CHECK-LABEL: vmin_vv_nxv2i64:
1175; CHECK:       # %bb.0:
1176; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1177; CHECK-NEXT:    vmin.vv v8, v8, v10, v0.t
1178; CHECK-NEXT:    ret
1179  %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
1180  ret <vscale x 2 x i64> %v
1181}
1182
1183define <vscale x 2 x i64> @vmin_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
1184; CHECK-LABEL: vmin_vv_nxv2i64_unmasked:
1185; CHECK:       # %bb.0:
1186; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1187; CHECK-NEXT:    vmin.vv v8, v8, v10
1188; CHECK-NEXT:    ret
1189  %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1190  ret <vscale x 2 x i64> %v
1191}
1192
1193define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1194; RV32-LABEL: vmin_vx_nxv2i64:
1195; RV32:       # %bb.0:
1196; RV32-NEXT:    addi sp, sp, -16
1197; RV32-NEXT:    .cfi_def_cfa_offset 16
1198; RV32-NEXT:    sw a0, 8(sp)
1199; RV32-NEXT:    sw a1, 12(sp)
1200; RV32-NEXT:    addi a0, sp, 8
1201; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1202; RV32-NEXT:    vlse64.v v10, (a0), zero
1203; RV32-NEXT:    vmin.vv v8, v8, v10, v0.t
1204; RV32-NEXT:    addi sp, sp, 16
1205; RV32-NEXT:    .cfi_def_cfa_offset 0
1206; RV32-NEXT:    ret
1207;
1208; RV64-LABEL: vmin_vx_nxv2i64:
1209; RV64:       # %bb.0:
1210; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1211; RV64-NEXT:    vmin.vx v8, v8, a0, v0.t
1212; RV64-NEXT:    ret
1213  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1214  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1215  %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1216  ret <vscale x 2 x i64> %v
1217}
1218
1219define <vscale x 2 x i64> @vmin_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1220; RV32-LABEL: vmin_vx_nxv2i64_unmasked:
1221; RV32:       # %bb.0:
1222; RV32-NEXT:    addi sp, sp, -16
1223; RV32-NEXT:    .cfi_def_cfa_offset 16
1224; RV32-NEXT:    sw a0, 8(sp)
1225; RV32-NEXT:    sw a1, 12(sp)
1226; RV32-NEXT:    addi a0, sp, 8
1227; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1228; RV32-NEXT:    vlse64.v v10, (a0), zero
1229; RV32-NEXT:    vmin.vv v8, v8, v10
1230; RV32-NEXT:    addi sp, sp, 16
1231; RV32-NEXT:    .cfi_def_cfa_offset 0
1232; RV32-NEXT:    ret
1233;
1234; RV64-LABEL: vmin_vx_nxv2i64_unmasked:
1235; RV64:       # %bb.0:
1236; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1237; RV64-NEXT:    vmin.vx v8, v8, a0
1238; RV64-NEXT:    ret
1239  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1240  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1241  %v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1242  ret <vscale x 2 x i64> %v
1243}
1244
1245declare <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1246
1247define <vscale x 4 x i64> @vmin_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1248; CHECK-LABEL: vmin_vv_nxv4i64:
1249; CHECK:       # %bb.0:
1250; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1251; CHECK-NEXT:    vmin.vv v8, v8, v12, v0.t
1252; CHECK-NEXT:    ret
1253  %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1254  ret <vscale x 4 x i64> %v
1255}
1256
1257define <vscale x 4 x i64> @vmin_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1258; CHECK-LABEL: vmin_vv_nxv4i64_unmasked:
1259; CHECK:       # %bb.0:
1260; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1261; CHECK-NEXT:    vmin.vv v8, v8, v12
1262; CHECK-NEXT:    ret
1263  %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1264  ret <vscale x 4 x i64> %v
1265}
1266
1267define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1268; RV32-LABEL: vmin_vx_nxv4i64:
1269; RV32:       # %bb.0:
1270; RV32-NEXT:    addi sp, sp, -16
1271; RV32-NEXT:    .cfi_def_cfa_offset 16
1272; RV32-NEXT:    sw a0, 8(sp)
1273; RV32-NEXT:    sw a1, 12(sp)
1274; RV32-NEXT:    addi a0, sp, 8
1275; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1276; RV32-NEXT:    vlse64.v v12, (a0), zero
1277; RV32-NEXT:    vmin.vv v8, v8, v12, v0.t
1278; RV32-NEXT:    addi sp, sp, 16
1279; RV32-NEXT:    .cfi_def_cfa_offset 0
1280; RV32-NEXT:    ret
1281;
1282; RV64-LABEL: vmin_vx_nxv4i64:
1283; RV64:       # %bb.0:
1284; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1285; RV64-NEXT:    vmin.vx v8, v8, a0, v0.t
1286; RV64-NEXT:    ret
1287  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1288  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1289  %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1290  ret <vscale x 4 x i64> %v
1291}
1292
1293define <vscale x 4 x i64> @vmin_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1294; RV32-LABEL: vmin_vx_nxv4i64_unmasked:
1295; RV32:       # %bb.0:
1296; RV32-NEXT:    addi sp, sp, -16
1297; RV32-NEXT:    .cfi_def_cfa_offset 16
1298; RV32-NEXT:    sw a0, 8(sp)
1299; RV32-NEXT:    sw a1, 12(sp)
1300; RV32-NEXT:    addi a0, sp, 8
1301; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1302; RV32-NEXT:    vlse64.v v12, (a0), zero
1303; RV32-NEXT:    vmin.vv v8, v8, v12
1304; RV32-NEXT:    addi sp, sp, 16
1305; RV32-NEXT:    .cfi_def_cfa_offset 0
1306; RV32-NEXT:    ret
1307;
1308; RV64-LABEL: vmin_vx_nxv4i64_unmasked:
1309; RV64:       # %bb.0:
1310; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1311; RV64-NEXT:    vmin.vx v8, v8, a0
1312; RV64-NEXT:    ret
1313  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1314  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1315  %v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1316  ret <vscale x 4 x i64> %v
1317}
1318
1319declare <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1320
1321define <vscale x 8 x i64> @vmin_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1322; CHECK-LABEL: vmin_vv_nxv8i64:
1323; CHECK:       # %bb.0:
1324; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1325; CHECK-NEXT:    vmin.vv v8, v8, v16, v0.t
1326; CHECK-NEXT:    ret
1327  %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1328  ret <vscale x 8 x i64> %v
1329}
1330
1331define <vscale x 8 x i64> @vmin_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1332; CHECK-LABEL: vmin_vv_nxv8i64_unmasked:
1333; CHECK:       # %bb.0:
1334; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1335; CHECK-NEXT:    vmin.vv v8, v8, v16
1336; CHECK-NEXT:    ret
1337  %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1338  ret <vscale x 8 x i64> %v
1339}
1340
1341define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1342; RV32-LABEL: vmin_vx_nxv8i64:
1343; RV32:       # %bb.0:
1344; RV32-NEXT:    addi sp, sp, -16
1345; RV32-NEXT:    .cfi_def_cfa_offset 16
1346; RV32-NEXT:    sw a0, 8(sp)
1347; RV32-NEXT:    sw a1, 12(sp)
1348; RV32-NEXT:    addi a0, sp, 8
1349; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1350; RV32-NEXT:    vlse64.v v16, (a0), zero
1351; RV32-NEXT:    vmin.vv v8, v8, v16, v0.t
1352; RV32-NEXT:    addi sp, sp, 16
1353; RV32-NEXT:    .cfi_def_cfa_offset 0
1354; RV32-NEXT:    ret
1355;
1356; RV64-LABEL: vmin_vx_nxv8i64:
1357; RV64:       # %bb.0:
1358; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1359; RV64-NEXT:    vmin.vx v8, v8, a0, v0.t
1360; RV64-NEXT:    ret
1361  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1362  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1363  %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1364  ret <vscale x 8 x i64> %v
1365}
1366
1367define <vscale x 8 x i64> @vmin_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1368; RV32-LABEL: vmin_vx_nxv8i64_unmasked:
1369; RV32:       # %bb.0:
1370; RV32-NEXT:    addi sp, sp, -16
1371; RV32-NEXT:    .cfi_def_cfa_offset 16
1372; RV32-NEXT:    sw a0, 8(sp)
1373; RV32-NEXT:    sw a1, 12(sp)
1374; RV32-NEXT:    addi a0, sp, 8
1375; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1376; RV32-NEXT:    vlse64.v v16, (a0), zero
1377; RV32-NEXT:    vmin.vv v8, v8, v16
1378; RV32-NEXT:    addi sp, sp, 16
1379; RV32-NEXT:    .cfi_def_cfa_offset 0
1380; RV32-NEXT:    ret
1381;
1382; RV64-LABEL: vmin_vx_nxv8i64_unmasked:
1383; RV64:       # %bb.0:
1384; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1385; RV64-NEXT:    vmin.vx v8, v8, a0
1386; RV64-NEXT:    ret
1387  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1388  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1389  %v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1390  ret <vscale x 8 x i64> %v
1391}
1392