xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll (revision 8ce81f17a16b8b689895c7c093d0401a75c09882)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
5; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \
7; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \
9; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
10
11declare <2 x half> @llvm.vp.minnum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
12
13define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
14; ZVFH-LABEL: vfmin_vv_v2f16:
15; ZVFH:       # %bb.0:
16; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
17; ZVFH-NEXT:    vfmin.vv v8, v8, v9, v0.t
18; ZVFH-NEXT:    ret
19;
20; ZVFHMIN-LABEL: vfmin_vv_v2f16:
21; ZVFHMIN:       # %bb.0:
22; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
23; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9, v0.t
24; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8, v0.t
25; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
26; ZVFHMIN-NEXT:    vfmin.vv v9, v9, v10, v0.t
27; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
28; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t
29; ZVFHMIN-NEXT:    ret
30  %v = call <2 x half> @llvm.vp.minnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
31  ret <2 x half> %v
32}
33
34define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) {
35; ZVFH-LABEL: vfmin_vv_v2f16_unmasked:
36; ZVFH:       # %bb.0:
37; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
38; ZVFH-NEXT:    vfmin.vv v8, v8, v9
39; ZVFH-NEXT:    ret
40;
41; ZVFHMIN-LABEL: vfmin_vv_v2f16_unmasked:
42; ZVFHMIN:       # %bb.0:
43; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
44; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
45; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
46; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
47; ZVFHMIN-NEXT:    vfmin.vv v9, v9, v10
48; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
49; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
50; ZVFHMIN-NEXT:    ret
51  %v = call <2 x half> @llvm.vp.minnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> splat (i1 true), i32 %evl)
52  ret <2 x half> %v
53}
54
55declare <4 x half> @llvm.vp.minnum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
56
57define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
58; ZVFH-LABEL: vfmin_vv_v4f16:
59; ZVFH:       # %bb.0:
60; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
61; ZVFH-NEXT:    vfmin.vv v8, v8, v9, v0.t
62; ZVFH-NEXT:    ret
63;
64; ZVFHMIN-LABEL: vfmin_vv_v4f16:
65; ZVFHMIN:       # %bb.0:
66; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
67; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9, v0.t
68; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8, v0.t
69; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
70; ZVFHMIN-NEXT:    vfmin.vv v9, v9, v10, v0.t
71; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
72; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9, v0.t
73; ZVFHMIN-NEXT:    ret
74  %v = call <4 x half> @llvm.vp.minnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
75  ret <4 x half> %v
76}
77
78define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) {
79; ZVFH-LABEL: vfmin_vv_v4f16_unmasked:
80; ZVFH:       # %bb.0:
81; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
82; ZVFH-NEXT:    vfmin.vv v8, v8, v9
83; ZVFH-NEXT:    ret
84;
85; ZVFHMIN-LABEL: vfmin_vv_v4f16_unmasked:
86; ZVFHMIN:       # %bb.0:
87; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
88; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
89; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
90; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
91; ZVFHMIN-NEXT:    vfmin.vv v9, v9, v10
92; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
93; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
94; ZVFHMIN-NEXT:    ret
95  %v = call <4 x half> @llvm.vp.minnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> splat (i1 true), i32 %evl)
96  ret <4 x half> %v
97}
98
99declare <8 x half> @llvm.vp.minnum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
100
101define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
102; ZVFH-LABEL: vfmin_vv_v8f16:
103; ZVFH:       # %bb.0:
104; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
105; ZVFH-NEXT:    vfmin.vv v8, v8, v9, v0.t
106; ZVFH-NEXT:    ret
107;
108; ZVFHMIN-LABEL: vfmin_vv_v8f16:
109; ZVFHMIN:       # %bb.0:
110; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
111; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9, v0.t
112; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
113; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
114; ZVFHMIN-NEXT:    vfmin.vv v10, v12, v10, v0.t
115; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
116; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
117; ZVFHMIN-NEXT:    ret
118  %v = call <8 x half> @llvm.vp.minnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
119  ret <8 x half> %v
120}
121
122define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) {
123; ZVFH-LABEL: vfmin_vv_v8f16_unmasked:
124; ZVFH:       # %bb.0:
125; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
126; ZVFH-NEXT:    vfmin.vv v8, v8, v9
127; ZVFH-NEXT:    ret
128;
129; ZVFHMIN-LABEL: vfmin_vv_v8f16_unmasked:
130; ZVFHMIN:       # %bb.0:
131; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
132; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
133; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
134; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
135; ZVFHMIN-NEXT:    vfmin.vv v10, v12, v10
136; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
137; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
138; ZVFHMIN-NEXT:    ret
139  %v = call <8 x half> @llvm.vp.minnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> splat (i1 true), i32 %evl)
140  ret <8 x half> %v
141}
142
143declare <16 x half> @llvm.vp.minnum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
144
145define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
146; ZVFH-LABEL: vfmin_vv_v16f16:
147; ZVFH:       # %bb.0:
148; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
149; ZVFH-NEXT:    vfmin.vv v8, v8, v10, v0.t
150; ZVFH-NEXT:    ret
151;
152; ZVFHMIN-LABEL: vfmin_vv_v16f16:
153; ZVFHMIN:       # %bb.0:
154; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
155; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v10, v0.t
156; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
157; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
158; ZVFHMIN-NEXT:    vfmin.vv v12, v16, v12, v0.t
159; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
160; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
161; ZVFHMIN-NEXT:    ret
162  %v = call <16 x half> @llvm.vp.minnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
163  ret <16 x half> %v
164}
165
166define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) {
167; ZVFH-LABEL: vfmin_vv_v16f16_unmasked:
168; ZVFH:       # %bb.0:
169; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
170; ZVFH-NEXT:    vfmin.vv v8, v8, v10
171; ZVFH-NEXT:    ret
172;
173; ZVFHMIN-LABEL: vfmin_vv_v16f16_unmasked:
174; ZVFHMIN:       # %bb.0:
175; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
176; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v10
177; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
178; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
179; ZVFHMIN-NEXT:    vfmin.vv v12, v16, v12
180; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
181; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
182; ZVFHMIN-NEXT:    ret
183  %v = call <16 x half> @llvm.vp.minnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> splat (i1 true), i32 %evl)
184  ret <16 x half> %v
185}
186
187declare <2 x float> @llvm.vp.minnum.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32)
188
189define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
190; CHECK-LABEL: vfmin_vv_v2f32:
191; CHECK:       # %bb.0:
192; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
193; CHECK-NEXT:    vfmin.vv v8, v8, v9, v0.t
194; CHECK-NEXT:    ret
195  %v = call <2 x float> @llvm.vp.minnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
196  ret <2 x float> %v
197}
198
199define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i32 zeroext %evl) {
200; CHECK-LABEL: vfmin_vv_v2f32_unmasked:
201; CHECK:       # %bb.0:
202; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
203; CHECK-NEXT:    vfmin.vv v8, v8, v9
204; CHECK-NEXT:    ret
205  %v = call <2 x float> @llvm.vp.minnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> splat (i1 true), i32 %evl)
206  ret <2 x float> %v
207}
208
209declare <4 x float> @llvm.vp.minnum.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32)
210
211define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
212; CHECK-LABEL: vfmin_vv_v4f32:
213; CHECK:       # %bb.0:
214; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
215; CHECK-NEXT:    vfmin.vv v8, v8, v9, v0.t
216; CHECK-NEXT:    ret
217  %v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
218  ret <4 x float> %v
219}
220
221define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i32 zeroext %evl) {
222; CHECK-LABEL: vfmin_vv_v4f32_unmasked:
223; CHECK:       # %bb.0:
224; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
225; CHECK-NEXT:    vfmin.vv v8, v8, v9
226; CHECK-NEXT:    ret
227  %v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> splat (i1 true), i32 %evl)
228  ret <4 x float> %v
229}
230
231declare <8 x float> @llvm.vp.minnum.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
232
233define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
234; CHECK-LABEL: vfmin_vv_v8f32:
235; CHECK:       # %bb.0:
236; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
237; CHECK-NEXT:    vfmin.vv v8, v8, v10, v0.t
238; CHECK-NEXT:    ret
239  %v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
240  ret <8 x float> %v
241}
242
243define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i32 zeroext %evl) {
244; CHECK-LABEL: vfmin_vv_v8f32_unmasked:
245; CHECK:       # %bb.0:
246; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
247; CHECK-NEXT:    vfmin.vv v8, v8, v10
248; CHECK-NEXT:    ret
249  %v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> splat (i1 true), i32 %evl)
250  ret <8 x float> %v
251}
252
253declare <16 x float> @llvm.vp.minnum.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32)
254
255define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
256; CHECK-LABEL: vfmin_vv_v16f32:
257; CHECK:       # %bb.0:
258; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
259; CHECK-NEXT:    vfmin.vv v8, v8, v12, v0.t
260; CHECK-NEXT:    ret
261  %v = call <16 x float> @llvm.vp.minnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
262  ret <16 x float> %v
263}
264
265define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb, i32 zeroext %evl) {
266; CHECK-LABEL: vfmin_vv_v16f32_unmasked:
267; CHECK:       # %bb.0:
268; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
269; CHECK-NEXT:    vfmin.vv v8, v8, v12
270; CHECK-NEXT:    ret
271  %v = call <16 x float> @llvm.vp.minnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> splat (i1 true), i32 %evl)
272  ret <16 x float> %v
273}
274
275declare <2 x double> @llvm.vp.minnum.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32)
276
277define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
278; CHECK-LABEL: vfmin_vv_v2f64:
279; CHECK:       # %bb.0:
280; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
281; CHECK-NEXT:    vfmin.vv v8, v8, v9, v0.t
282; CHECK-NEXT:    ret
283  %v = call <2 x double> @llvm.vp.minnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
284  ret <2 x double> %v
285}
286
287define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, i32 zeroext %evl) {
288; CHECK-LABEL: vfmin_vv_v2f64_unmasked:
289; CHECK:       # %bb.0:
290; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
291; CHECK-NEXT:    vfmin.vv v8, v8, v9
292; CHECK-NEXT:    ret
293  %v = call <2 x double> @llvm.vp.minnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> splat (i1 true), i32 %evl)
294  ret <2 x double> %v
295}
296
297declare <4 x double> @llvm.vp.minnum.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32)
298
299define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
300; CHECK-LABEL: vfmin_vv_v4f64:
301; CHECK:       # %bb.0:
302; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
303; CHECK-NEXT:    vfmin.vv v8, v8, v10, v0.t
304; CHECK-NEXT:    ret
305  %v = call <4 x double> @llvm.vp.minnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
306  ret <4 x double> %v
307}
308
309define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, i32 zeroext %evl) {
310; CHECK-LABEL: vfmin_vv_v4f64_unmasked:
311; CHECK:       # %bb.0:
312; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
313; CHECK-NEXT:    vfmin.vv v8, v8, v10
314; CHECK-NEXT:    ret
315  %v = call <4 x double> @llvm.vp.minnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> splat (i1 true), i32 %evl)
316  ret <4 x double> %v
317}
318
319declare <8 x double> @llvm.vp.minnum.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32)
320
321define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
322; CHECK-LABEL: vfmin_vv_v8f64:
323; CHECK:       # %bb.0:
324; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
325; CHECK-NEXT:    vfmin.vv v8, v8, v12, v0.t
326; CHECK-NEXT:    ret
327  %v = call <8 x double> @llvm.vp.minnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
328  ret <8 x double> %v
329}
330
331define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, i32 zeroext %evl) {
332; CHECK-LABEL: vfmin_vv_v8f64_unmasked:
333; CHECK:       # %bb.0:
334; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
335; CHECK-NEXT:    vfmin.vv v8, v8, v12
336; CHECK-NEXT:    ret
337  %v = call <8 x double> @llvm.vp.minnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> splat (i1 true), i32 %evl)
338  ret <8 x double> %v
339}
340
341declare <15 x double> @llvm.vp.minnum.v15f64(<15 x double>, <15 x double>, <15 x i1>, i32)
342
343define <15 x double> @vfmin_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) {
344; CHECK-LABEL: vfmin_vv_v15f64:
345; CHECK:       # %bb.0:
346; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
347; CHECK-NEXT:    vfmin.vv v8, v8, v16, v0.t
348; CHECK-NEXT:    ret
349  %v = call <15 x double> @llvm.vp.minnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
350  ret <15 x double> %v
351}
352
353define <15 x double> @vfmin_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %vb, i32 zeroext %evl) {
354; CHECK-LABEL: vfmin_vv_v15f64_unmasked:
355; CHECK:       # %bb.0:
356; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
357; CHECK-NEXT:    vfmin.vv v8, v8, v16
358; CHECK-NEXT:    ret
359  %v = call <15 x double> @llvm.vp.minnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> splat (i1 true), i32 %evl)
360  ret <15 x double> %v
361}
362
363declare <16 x double> @llvm.vp.minnum.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32)
364
365define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) {
366; CHECK-LABEL: vfmin_vv_v16f64:
367; CHECK:       # %bb.0:
368; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
369; CHECK-NEXT:    vfmin.vv v8, v8, v16, v0.t
370; CHECK-NEXT:    ret
371  %v = call <16 x double> @llvm.vp.minnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
372  ret <16 x double> %v
373}
374
375define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %vb, i32 zeroext %evl) {
376; CHECK-LABEL: vfmin_vv_v16f64_unmasked:
377; CHECK:       # %bb.0:
378; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
379; CHECK-NEXT:    vfmin.vv v8, v8, v16
380; CHECK-NEXT:    ret
381  %v = call <16 x double> @llvm.vp.minnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> splat (i1 true), i32 %evl)
382  ret <16 x double> %v
383}
384
385declare <32 x double> @llvm.vp.minnum.v32f64(<32 x double>, <32 x double>, <32 x i1>, i32)
386
387define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) {
388; CHECK-LABEL: vfmin_vv_v32f64:
389; CHECK:       # %bb.0:
390; CHECK-NEXT:    addi sp, sp, -16
391; CHECK-NEXT:    .cfi_def_cfa_offset 16
392; CHECK-NEXT:    csrr a1, vlenb
393; CHECK-NEXT:    slli a1, a1, 4
394; CHECK-NEXT:    sub sp, sp, a1
395; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
396; CHECK-NEXT:    csrr a1, vlenb
397; CHECK-NEXT:    slli a1, a1, 3
398; CHECK-NEXT:    add a1, sp, a1
399; CHECK-NEXT:    addi a1, a1, 16
400; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
401; CHECK-NEXT:    addi a1, a0, 128
402; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
403; CHECK-NEXT:    vle64.v v16, (a1)
404; CHECK-NEXT:    addi a1, sp, 16
405; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
406; CHECK-NEXT:    vle64.v v16, (a0)
407; CHECK-NEXT:    li a1, 16
408; CHECK-NEXT:    mv a0, a2
409; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
410; CHECK-NEXT:    vslidedown.vi v24, v0, 2
411; CHECK-NEXT:    bltu a2, a1, .LBB26_2
412; CHECK-NEXT:  # %bb.1:
413; CHECK-NEXT:    li a0, 16
414; CHECK-NEXT:  .LBB26_2:
415; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
416; CHECK-NEXT:    vfmin.vv v8, v8, v16, v0.t
417; CHECK-NEXT:    addi a0, a2, -16
418; CHECK-NEXT:    sltu a1, a2, a0
419; CHECK-NEXT:    addi a1, a1, -1
420; CHECK-NEXT:    and a0, a1, a0
421; CHECK-NEXT:    vmv1r.v v0, v24
422; CHECK-NEXT:    csrr a1, vlenb
423; CHECK-NEXT:    slli a1, a1, 3
424; CHECK-NEXT:    add a1, sp, a1
425; CHECK-NEXT:    addi a1, a1, 16
426; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
427; CHECK-NEXT:    addi a1, sp, 16
428; CHECK-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
429; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
430; CHECK-NEXT:    vfmin.vv v16, v16, v24, v0.t
431; CHECK-NEXT:    csrr a0, vlenb
432; CHECK-NEXT:    slli a0, a0, 4
433; CHECK-NEXT:    add sp, sp, a0
434; CHECK-NEXT:    .cfi_def_cfa sp, 16
435; CHECK-NEXT:    addi sp, sp, 16
436; CHECK-NEXT:    .cfi_def_cfa_offset 0
437; CHECK-NEXT:    ret
438  %v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
439  ret <32 x double> %v
440}
441
442define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %vb, i32 zeroext %evl) {
443; CHECK-LABEL: vfmin_vv_v32f64_unmasked:
444; CHECK:       # %bb.0:
445; CHECK-NEXT:    addi a1, a0, 128
446; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
447; CHECK-NEXT:    vle64.v v24, (a1)
448; CHECK-NEXT:    vle64.v v0, (a0)
449; CHECK-NEXT:    li a1, 16
450; CHECK-NEXT:    mv a0, a2
451; CHECK-NEXT:    bltu a2, a1, .LBB27_2
452; CHECK-NEXT:  # %bb.1:
453; CHECK-NEXT:    li a0, 16
454; CHECK-NEXT:  .LBB27_2:
455; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
456; CHECK-NEXT:    vfmin.vv v8, v8, v0
457; CHECK-NEXT:    addi a0, a2, -16
458; CHECK-NEXT:    sltu a1, a2, a0
459; CHECK-NEXT:    addi a1, a1, -1
460; CHECK-NEXT:    and a0, a1, a0
461; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
462; CHECK-NEXT:    vfmin.vv v16, v16, v24
463; CHECK-NEXT:    ret
464  %v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> splat (i1 true), i32 %evl)
465  ret <32 x double> %v
466}
467