xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll (revision 36e4176f1d83d04cdebb4e1870561099b2478d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 8 x i7> @llvm.vp.udiv.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
8
9define <vscale x 8 x i7> @vdivu_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10; CHECK-LABEL: vdivu_vx_nxv8i7:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    li a2, 127
13; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
14; CHECK-NEXT:    vmv.v.x v9, a0
15; CHECK-NEXT:    vand.vx v8, v8, a2, v0.t
16; CHECK-NEXT:    vand.vx v9, v9, a2, v0.t
17; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
18; CHECK-NEXT:    ret
19  %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
20  %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
21  %v = call <vscale x 8 x i7> @llvm.vp.udiv.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
22  ret <vscale x 8 x i7> %v
23}
24
25declare <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
26
27define <vscale x 1 x i8> @vdivu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
28; CHECK-LABEL: vdivu_vv_nxv1i8:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
31; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
32; CHECK-NEXT:    ret
33  %v = call <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
34  ret <vscale x 1 x i8> %v
35}
36
37define <vscale x 1 x i8> @vdivu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
38; CHECK-LABEL: vdivu_vv_nxv1i8_unmasked:
39; CHECK:       # %bb.0:
40; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
41; CHECK-NEXT:    vdivu.vv v8, v8, v9
42; CHECK-NEXT:    ret
43  %v = call <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
44  ret <vscale x 1 x i8> %v
45}
46
47define <vscale x 1 x i8> @vdivu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
48; CHECK-LABEL: vdivu_vx_nxv1i8:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
51; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
52; CHECK-NEXT:    ret
53  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
54  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
55  %v = call <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
56  ret <vscale x 1 x i8> %v
57}
58
59define <vscale x 1 x i8> @vdivu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
60; CHECK-LABEL: vdivu_vx_nxv1i8_unmasked:
61; CHECK:       # %bb.0:
62; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
63; CHECK-NEXT:    vdivu.vx v8, v8, a0
64; CHECK-NEXT:    ret
65  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
66  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
67  %v = call <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
68  ret <vscale x 1 x i8> %v
69}
70
71declare <vscale x 2 x i8> @llvm.vp.udiv.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
72
73define <vscale x 2 x i8> @vdivu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
74; CHECK-LABEL: vdivu_vv_nxv2i8:
75; CHECK:       # %bb.0:
76; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
77; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
78; CHECK-NEXT:    ret
79  %v = call <vscale x 2 x i8> @llvm.vp.udiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
80  ret <vscale x 2 x i8> %v
81}
82
83define <vscale x 2 x i8> @vdivu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
84; CHECK-LABEL: vdivu_vv_nxv2i8_unmasked:
85; CHECK:       # %bb.0:
86; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
87; CHECK-NEXT:    vdivu.vv v8, v8, v9
88; CHECK-NEXT:    ret
89  %v = call <vscale x 2 x i8> @llvm.vp.udiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
90  ret <vscale x 2 x i8> %v
91}
92
93define <vscale x 2 x i8> @vdivu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
94; CHECK-LABEL: vdivu_vx_nxv2i8:
95; CHECK:       # %bb.0:
96; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
97; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
98; CHECK-NEXT:    ret
99  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
100  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
101  %v = call <vscale x 2 x i8> @llvm.vp.udiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
102  ret <vscale x 2 x i8> %v
103}
104
105define <vscale x 2 x i8> @vdivu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
106; CHECK-LABEL: vdivu_vx_nxv2i8_unmasked:
107; CHECK:       # %bb.0:
108; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
109; CHECK-NEXT:    vdivu.vx v8, v8, a0
110; CHECK-NEXT:    ret
111  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
112  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
113  %v = call <vscale x 2 x i8> @llvm.vp.udiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
114  ret <vscale x 2 x i8> %v
115}
116
117declare <vscale x 3 x i8> @llvm.vp.udiv.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
118
119define <vscale x 3 x i8> @vdivu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
120; CHECK-LABEL: vdivu_vv_nxv3i8:
121; CHECK:       # %bb.0:
122; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
123; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
124; CHECK-NEXT:    ret
125  %v = call <vscale x 3 x i8> @llvm.vp.udiv.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
126  ret <vscale x 3 x i8> %v
127}
128
129declare <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
130
131define <vscale x 4 x i8> @vdivu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
132; CHECK-LABEL: vdivu_vv_nxv4i8:
133; CHECK:       # %bb.0:
134; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
135; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
136; CHECK-NEXT:    ret
137  %v = call <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
138  ret <vscale x 4 x i8> %v
139}
140
141define <vscale x 4 x i8> @vdivu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
142; CHECK-LABEL: vdivu_vv_nxv4i8_unmasked:
143; CHECK:       # %bb.0:
144; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
145; CHECK-NEXT:    vdivu.vv v8, v8, v9
146; CHECK-NEXT:    ret
147  %v = call <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
148  ret <vscale x 4 x i8> %v
149}
150
151define <vscale x 4 x i8> @vdivu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
152; CHECK-LABEL: vdivu_vx_nxv4i8:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
155; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
156; CHECK-NEXT:    ret
157  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
158  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
159  %v = call <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
160  ret <vscale x 4 x i8> %v
161}
162
163define <vscale x 4 x i8> @vdivu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
164; CHECK-LABEL: vdivu_vx_nxv4i8_unmasked:
165; CHECK:       # %bb.0:
166; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
167; CHECK-NEXT:    vdivu.vx v8, v8, a0
168; CHECK-NEXT:    ret
169  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
170  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
171  %v = call <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
172  ret <vscale x 4 x i8> %v
173}
174
175declare <vscale x 8 x i8> @llvm.vp.udiv.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
176
177define <vscale x 8 x i8> @vdivu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
178; CHECK-LABEL: vdivu_vv_nxv8i8:
179; CHECK:       # %bb.0:
180; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
181; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
182; CHECK-NEXT:    ret
183  %v = call <vscale x 8 x i8> @llvm.vp.udiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
184  ret <vscale x 8 x i8> %v
185}
186
187define <vscale x 8 x i8> @vdivu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
188; CHECK-LABEL: vdivu_vv_nxv8i8_unmasked:
189; CHECK:       # %bb.0:
190; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
191; CHECK-NEXT:    vdivu.vv v8, v8, v9
192; CHECK-NEXT:    ret
193  %v = call <vscale x 8 x i8> @llvm.vp.udiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
194  ret <vscale x 8 x i8> %v
195}
196
197define <vscale x 8 x i8> @vdivu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
198; CHECK-LABEL: vdivu_vx_nxv8i8:
199; CHECK:       # %bb.0:
200; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
201; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
202; CHECK-NEXT:    ret
203  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
204  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
205  %v = call <vscale x 8 x i8> @llvm.vp.udiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
206  ret <vscale x 8 x i8> %v
207}
208
209define <vscale x 8 x i8> @vdivu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
210; CHECK-LABEL: vdivu_vx_nxv8i8_unmasked:
211; CHECK:       # %bb.0:
212; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
213; CHECK-NEXT:    vdivu.vx v8, v8, a0
214; CHECK-NEXT:    ret
215  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
216  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
217  %v = call <vscale x 8 x i8> @llvm.vp.udiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
218  ret <vscale x 8 x i8> %v
219}
220
221declare <vscale x 16 x i8> @llvm.vp.udiv.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
222
223define <vscale x 16 x i8> @vdivu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
224; CHECK-LABEL: vdivu_vv_nxv16i8:
225; CHECK:       # %bb.0:
226; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
227; CHECK-NEXT:    vdivu.vv v8, v8, v10, v0.t
228; CHECK-NEXT:    ret
229  %v = call <vscale x 16 x i8> @llvm.vp.udiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
230  ret <vscale x 16 x i8> %v
231}
232
233define <vscale x 16 x i8> @vdivu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
234; CHECK-LABEL: vdivu_vv_nxv16i8_unmasked:
235; CHECK:       # %bb.0:
236; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
237; CHECK-NEXT:    vdivu.vv v8, v8, v10
238; CHECK-NEXT:    ret
239  %v = call <vscale x 16 x i8> @llvm.vp.udiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
240  ret <vscale x 16 x i8> %v
241}
242
243define <vscale x 16 x i8> @vdivu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
244; CHECK-LABEL: vdivu_vx_nxv16i8:
245; CHECK:       # %bb.0:
246; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
247; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
248; CHECK-NEXT:    ret
249  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
250  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
251  %v = call <vscale x 16 x i8> @llvm.vp.udiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
252  ret <vscale x 16 x i8> %v
253}
254
255define <vscale x 16 x i8> @vdivu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
256; CHECK-LABEL: vdivu_vx_nxv16i8_unmasked:
257; CHECK:       # %bb.0:
258; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
259; CHECK-NEXT:    vdivu.vx v8, v8, a0
260; CHECK-NEXT:    ret
261  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
262  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
263  %v = call <vscale x 16 x i8> @llvm.vp.udiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
264  ret <vscale x 16 x i8> %v
265}
266
267declare <vscale x 32 x i8> @llvm.vp.udiv.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
268
269define <vscale x 32 x i8> @vdivu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
270; CHECK-LABEL: vdivu_vv_nxv32i8:
271; CHECK:       # %bb.0:
272; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
273; CHECK-NEXT:    vdivu.vv v8, v8, v12, v0.t
274; CHECK-NEXT:    ret
275  %v = call <vscale x 32 x i8> @llvm.vp.udiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
276  ret <vscale x 32 x i8> %v
277}
278
279define <vscale x 32 x i8> @vdivu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
280; CHECK-LABEL: vdivu_vv_nxv32i8_unmasked:
281; CHECK:       # %bb.0:
282; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
283; CHECK-NEXT:    vdivu.vv v8, v8, v12
284; CHECK-NEXT:    ret
285  %v = call <vscale x 32 x i8> @llvm.vp.udiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
286  ret <vscale x 32 x i8> %v
287}
288
289define <vscale x 32 x i8> @vdivu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
290; CHECK-LABEL: vdivu_vx_nxv32i8:
291; CHECK:       # %bb.0:
292; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
293; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
294; CHECK-NEXT:    ret
295  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
296  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
297  %v = call <vscale x 32 x i8> @llvm.vp.udiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
298  ret <vscale x 32 x i8> %v
299}
300
301define <vscale x 32 x i8> @vdivu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
302; CHECK-LABEL: vdivu_vx_nxv32i8_unmasked:
303; CHECK:       # %bb.0:
304; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
305; CHECK-NEXT:    vdivu.vx v8, v8, a0
306; CHECK-NEXT:    ret
307  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
308  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
309  %v = call <vscale x 32 x i8> @llvm.vp.udiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
310  ret <vscale x 32 x i8> %v
311}
312
313declare <vscale x 64 x i8> @llvm.vp.udiv.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
314
315define <vscale x 64 x i8> @vdivu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
316; CHECK-LABEL: vdivu_vv_nxv64i8:
317; CHECK:       # %bb.0:
318; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
319; CHECK-NEXT:    vdivu.vv v8, v8, v16, v0.t
320; CHECK-NEXT:    ret
321  %v = call <vscale x 64 x i8> @llvm.vp.udiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
322  ret <vscale x 64 x i8> %v
323}
324
325define <vscale x 64 x i8> @vdivu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
326; CHECK-LABEL: vdivu_vv_nxv64i8_unmasked:
327; CHECK:       # %bb.0:
328; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
329; CHECK-NEXT:    vdivu.vv v8, v8, v16
330; CHECK-NEXT:    ret
331  %v = call <vscale x 64 x i8> @llvm.vp.udiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
332  ret <vscale x 64 x i8> %v
333}
334
335define <vscale x 64 x i8> @vdivu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
336; CHECK-LABEL: vdivu_vx_nxv64i8:
337; CHECK:       # %bb.0:
338; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
339; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
340; CHECK-NEXT:    ret
341  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
342  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
343  %v = call <vscale x 64 x i8> @llvm.vp.udiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
344  ret <vscale x 64 x i8> %v
345}
346
347define <vscale x 64 x i8> @vdivu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
348; CHECK-LABEL: vdivu_vx_nxv64i8_unmasked:
349; CHECK:       # %bb.0:
350; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
351; CHECK-NEXT:    vdivu.vx v8, v8, a0
352; CHECK-NEXT:    ret
353  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
354  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
355  %v = call <vscale x 64 x i8> @llvm.vp.udiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
356  ret <vscale x 64 x i8> %v
357}
358
359declare <vscale x 1 x i16> @llvm.vp.udiv.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
360
361define <vscale x 1 x i16> @vdivu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
362; CHECK-LABEL: vdivu_vv_nxv1i16:
363; CHECK:       # %bb.0:
364; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
365; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
366; CHECK-NEXT:    ret
367  %v = call <vscale x 1 x i16> @llvm.vp.udiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
368  ret <vscale x 1 x i16> %v
369}
370
371define <vscale x 1 x i16> @vdivu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
372; CHECK-LABEL: vdivu_vv_nxv1i16_unmasked:
373; CHECK:       # %bb.0:
374; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
375; CHECK-NEXT:    vdivu.vv v8, v8, v9
376; CHECK-NEXT:    ret
377  %v = call <vscale x 1 x i16> @llvm.vp.udiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
378  ret <vscale x 1 x i16> %v
379}
380
381define <vscale x 1 x i16> @vdivu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
382; CHECK-LABEL: vdivu_vx_nxv1i16:
383; CHECK:       # %bb.0:
384; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
385; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
386; CHECK-NEXT:    ret
387  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
388  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
389  %v = call <vscale x 1 x i16> @llvm.vp.udiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
390  ret <vscale x 1 x i16> %v
391}
392
393define <vscale x 1 x i16> @vdivu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
394; CHECK-LABEL: vdivu_vx_nxv1i16_unmasked:
395; CHECK:       # %bb.0:
396; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
397; CHECK-NEXT:    vdivu.vx v8, v8, a0
398; CHECK-NEXT:    ret
399  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
400  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
401  %v = call <vscale x 1 x i16> @llvm.vp.udiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
402  ret <vscale x 1 x i16> %v
403}
404
405declare <vscale x 2 x i16> @llvm.vp.udiv.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
406
407define <vscale x 2 x i16> @vdivu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
408; CHECK-LABEL: vdivu_vv_nxv2i16:
409; CHECK:       # %bb.0:
410; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
411; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
412; CHECK-NEXT:    ret
413  %v = call <vscale x 2 x i16> @llvm.vp.udiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
414  ret <vscale x 2 x i16> %v
415}
416
417define <vscale x 2 x i16> @vdivu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
418; CHECK-LABEL: vdivu_vv_nxv2i16_unmasked:
419; CHECK:       # %bb.0:
420; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
421; CHECK-NEXT:    vdivu.vv v8, v8, v9
422; CHECK-NEXT:    ret
423  %v = call <vscale x 2 x i16> @llvm.vp.udiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
424  ret <vscale x 2 x i16> %v
425}
426
427define <vscale x 2 x i16> @vdivu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
428; CHECK-LABEL: vdivu_vx_nxv2i16:
429; CHECK:       # %bb.0:
430; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
431; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
432; CHECK-NEXT:    ret
433  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
434  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
435  %v = call <vscale x 2 x i16> @llvm.vp.udiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
436  ret <vscale x 2 x i16> %v
437}
438
439define <vscale x 2 x i16> @vdivu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
440; CHECK-LABEL: vdivu_vx_nxv2i16_unmasked:
441; CHECK:       # %bb.0:
442; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
443; CHECK-NEXT:    vdivu.vx v8, v8, a0
444; CHECK-NEXT:    ret
445  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
446  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
447  %v = call <vscale x 2 x i16> @llvm.vp.udiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
448  ret <vscale x 2 x i16> %v
449}
450
451declare <vscale x 4 x i16> @llvm.vp.udiv.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
452
453define <vscale x 4 x i16> @vdivu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
454; CHECK-LABEL: vdivu_vv_nxv4i16:
455; CHECK:       # %bb.0:
456; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
457; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
458; CHECK-NEXT:    ret
459  %v = call <vscale x 4 x i16> @llvm.vp.udiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
460  ret <vscale x 4 x i16> %v
461}
462
463define <vscale x 4 x i16> @vdivu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
464; CHECK-LABEL: vdivu_vv_nxv4i16_unmasked:
465; CHECK:       # %bb.0:
466; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
467; CHECK-NEXT:    vdivu.vv v8, v8, v9
468; CHECK-NEXT:    ret
469  %v = call <vscale x 4 x i16> @llvm.vp.udiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
470  ret <vscale x 4 x i16> %v
471}
472
473define <vscale x 4 x i16> @vdivu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
474; CHECK-LABEL: vdivu_vx_nxv4i16:
475; CHECK:       # %bb.0:
476; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
477; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
478; CHECK-NEXT:    ret
479  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
480  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
481  %v = call <vscale x 4 x i16> @llvm.vp.udiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
482  ret <vscale x 4 x i16> %v
483}
484
485define <vscale x 4 x i16> @vdivu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
486; CHECK-LABEL: vdivu_vx_nxv4i16_unmasked:
487; CHECK:       # %bb.0:
488; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
489; CHECK-NEXT:    vdivu.vx v8, v8, a0
490; CHECK-NEXT:    ret
491  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
492  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
493  %v = call <vscale x 4 x i16> @llvm.vp.udiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
494  ret <vscale x 4 x i16> %v
495}
496
497declare <vscale x 8 x i16> @llvm.vp.udiv.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
498
499define <vscale x 8 x i16> @vdivu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
500; CHECK-LABEL: vdivu_vv_nxv8i16:
501; CHECK:       # %bb.0:
502; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
503; CHECK-NEXT:    vdivu.vv v8, v8, v10, v0.t
504; CHECK-NEXT:    ret
505  %v = call <vscale x 8 x i16> @llvm.vp.udiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
506  ret <vscale x 8 x i16> %v
507}
508
509define <vscale x 8 x i16> @vdivu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
510; CHECK-LABEL: vdivu_vv_nxv8i16_unmasked:
511; CHECK:       # %bb.0:
512; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
513; CHECK-NEXT:    vdivu.vv v8, v8, v10
514; CHECK-NEXT:    ret
515  %v = call <vscale x 8 x i16> @llvm.vp.udiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
516  ret <vscale x 8 x i16> %v
517}
518
519define <vscale x 8 x i16> @vdivu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
520; CHECK-LABEL: vdivu_vx_nxv8i16:
521; CHECK:       # %bb.0:
522; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
523; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
524; CHECK-NEXT:    ret
525  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
526  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
527  %v = call <vscale x 8 x i16> @llvm.vp.udiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
528  ret <vscale x 8 x i16> %v
529}
530
531define <vscale x 8 x i16> @vdivu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
532; CHECK-LABEL: vdivu_vx_nxv8i16_unmasked:
533; CHECK:       # %bb.0:
534; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
535; CHECK-NEXT:    vdivu.vx v8, v8, a0
536; CHECK-NEXT:    ret
537  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
538  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
539  %v = call <vscale x 8 x i16> @llvm.vp.udiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
540  ret <vscale x 8 x i16> %v
541}
542
543declare <vscale x 16 x i16> @llvm.vp.udiv.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
544
545define <vscale x 16 x i16> @vdivu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
546; CHECK-LABEL: vdivu_vv_nxv16i16:
547; CHECK:       # %bb.0:
548; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
549; CHECK-NEXT:    vdivu.vv v8, v8, v12, v0.t
550; CHECK-NEXT:    ret
551  %v = call <vscale x 16 x i16> @llvm.vp.udiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
552  ret <vscale x 16 x i16> %v
553}
554
555define <vscale x 16 x i16> @vdivu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
556; CHECK-LABEL: vdivu_vv_nxv16i16_unmasked:
557; CHECK:       # %bb.0:
558; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
559; CHECK-NEXT:    vdivu.vv v8, v8, v12
560; CHECK-NEXT:    ret
561  %v = call <vscale x 16 x i16> @llvm.vp.udiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
562  ret <vscale x 16 x i16> %v
563}
564
565define <vscale x 16 x i16> @vdivu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
566; CHECK-LABEL: vdivu_vx_nxv16i16:
567; CHECK:       # %bb.0:
568; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
569; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
570; CHECK-NEXT:    ret
571  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
572  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
573  %v = call <vscale x 16 x i16> @llvm.vp.udiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
574  ret <vscale x 16 x i16> %v
575}
576
577define <vscale x 16 x i16> @vdivu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
578; CHECK-LABEL: vdivu_vx_nxv16i16_unmasked:
579; CHECK:       # %bb.0:
580; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
581; CHECK-NEXT:    vdivu.vx v8, v8, a0
582; CHECK-NEXT:    ret
583  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
584  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
585  %v = call <vscale x 16 x i16> @llvm.vp.udiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
586  ret <vscale x 16 x i16> %v
587}
588
589declare <vscale x 32 x i16> @llvm.vp.udiv.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
590
591define <vscale x 32 x i16> @vdivu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
592; CHECK-LABEL: vdivu_vv_nxv32i16:
593; CHECK:       # %bb.0:
594; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
595; CHECK-NEXT:    vdivu.vv v8, v8, v16, v0.t
596; CHECK-NEXT:    ret
597  %v = call <vscale x 32 x i16> @llvm.vp.udiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
598  ret <vscale x 32 x i16> %v
599}
600
601define <vscale x 32 x i16> @vdivu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
602; CHECK-LABEL: vdivu_vv_nxv32i16_unmasked:
603; CHECK:       # %bb.0:
604; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
605; CHECK-NEXT:    vdivu.vv v8, v8, v16
606; CHECK-NEXT:    ret
607  %v = call <vscale x 32 x i16> @llvm.vp.udiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
608  ret <vscale x 32 x i16> %v
609}
610
611define <vscale x 32 x i16> @vdivu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
612; CHECK-LABEL: vdivu_vx_nxv32i16:
613; CHECK:       # %bb.0:
614; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
615; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
616; CHECK-NEXT:    ret
617  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
618  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
619  %v = call <vscale x 32 x i16> @llvm.vp.udiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
620  ret <vscale x 32 x i16> %v
621}
622
623define <vscale x 32 x i16> @vdivu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
624; CHECK-LABEL: vdivu_vx_nxv32i16_unmasked:
625; CHECK:       # %bb.0:
626; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
627; CHECK-NEXT:    vdivu.vx v8, v8, a0
628; CHECK-NEXT:    ret
629  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
630  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
631  %v = call <vscale x 32 x i16> @llvm.vp.udiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
632  ret <vscale x 32 x i16> %v
633}
634
635declare <vscale x 1 x i32> @llvm.vp.udiv.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
636
637define <vscale x 1 x i32> @vdivu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
638; CHECK-LABEL: vdivu_vv_nxv1i32:
639; CHECK:       # %bb.0:
640; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
641; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
642; CHECK-NEXT:    ret
643  %v = call <vscale x 1 x i32> @llvm.vp.udiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
644  ret <vscale x 1 x i32> %v
645}
646
647define <vscale x 1 x i32> @vdivu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
648; CHECK-LABEL: vdivu_vv_nxv1i32_unmasked:
649; CHECK:       # %bb.0:
650; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
651; CHECK-NEXT:    vdivu.vv v8, v8, v9
652; CHECK-NEXT:    ret
653  %v = call <vscale x 1 x i32> @llvm.vp.udiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
654  ret <vscale x 1 x i32> %v
655}
656
657define <vscale x 1 x i32> @vdivu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
658; CHECK-LABEL: vdivu_vx_nxv1i32:
659; CHECK:       # %bb.0:
660; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
661; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
662; CHECK-NEXT:    ret
663  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
664  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
665  %v = call <vscale x 1 x i32> @llvm.vp.udiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
666  ret <vscale x 1 x i32> %v
667}
668
669define <vscale x 1 x i32> @vdivu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
670; CHECK-LABEL: vdivu_vx_nxv1i32_unmasked:
671; CHECK:       # %bb.0:
672; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
673; CHECK-NEXT:    vdivu.vx v8, v8, a0
674; CHECK-NEXT:    ret
675  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
676  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
677  %v = call <vscale x 1 x i32> @llvm.vp.udiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
678  ret <vscale x 1 x i32> %v
679}
680
681declare <vscale x 2 x i32> @llvm.vp.udiv.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
682
683define <vscale x 2 x i32> @vdivu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
684; CHECK-LABEL: vdivu_vv_nxv2i32:
685; CHECK:       # %bb.0:
686; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
687; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
688; CHECK-NEXT:    ret
689  %v = call <vscale x 2 x i32> @llvm.vp.udiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
690  ret <vscale x 2 x i32> %v
691}
692
693define <vscale x 2 x i32> @vdivu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
694; CHECK-LABEL: vdivu_vv_nxv2i32_unmasked:
695; CHECK:       # %bb.0:
696; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
697; CHECK-NEXT:    vdivu.vv v8, v8, v9
698; CHECK-NEXT:    ret
699  %v = call <vscale x 2 x i32> @llvm.vp.udiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
700  ret <vscale x 2 x i32> %v
701}
702
703define <vscale x 2 x i32> @vdivu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
704; CHECK-LABEL: vdivu_vx_nxv2i32:
705; CHECK:       # %bb.0:
706; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
707; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
708; CHECK-NEXT:    ret
709  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
710  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
711  %v = call <vscale x 2 x i32> @llvm.vp.udiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
712  ret <vscale x 2 x i32> %v
713}
714
715define <vscale x 2 x i32> @vdivu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
716; CHECK-LABEL: vdivu_vx_nxv2i32_unmasked:
717; CHECK:       # %bb.0:
718; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
719; CHECK-NEXT:    vdivu.vx v8, v8, a0
720; CHECK-NEXT:    ret
721  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
722  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
723  %v = call <vscale x 2 x i32> @llvm.vp.udiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
724  ret <vscale x 2 x i32> %v
725}
726
727declare <vscale x 4 x i32> @llvm.vp.udiv.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
728
729define <vscale x 4 x i32> @vdivu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
730; CHECK-LABEL: vdivu_vv_nxv4i32:
731; CHECK:       # %bb.0:
732; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
733; CHECK-NEXT:    vdivu.vv v8, v8, v10, v0.t
734; CHECK-NEXT:    ret
735  %v = call <vscale x 4 x i32> @llvm.vp.udiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
736  ret <vscale x 4 x i32> %v
737}
738
739define <vscale x 4 x i32> @vdivu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
740; CHECK-LABEL: vdivu_vv_nxv4i32_unmasked:
741; CHECK:       # %bb.0:
742; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
743; CHECK-NEXT:    vdivu.vv v8, v8, v10
744; CHECK-NEXT:    ret
745  %v = call <vscale x 4 x i32> @llvm.vp.udiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
746  ret <vscale x 4 x i32> %v
747}
748
749define <vscale x 4 x i32> @vdivu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
750; CHECK-LABEL: vdivu_vx_nxv4i32:
751; CHECK:       # %bb.0:
752; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
753; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
754; CHECK-NEXT:    ret
755  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
756  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
757  %v = call <vscale x 4 x i32> @llvm.vp.udiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
758  ret <vscale x 4 x i32> %v
759}
760
761define <vscale x 4 x i32> @vdivu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
762; CHECK-LABEL: vdivu_vx_nxv4i32_unmasked:
763; CHECK:       # %bb.0:
764; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
765; CHECK-NEXT:    vdivu.vx v8, v8, a0
766; CHECK-NEXT:    ret
767  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
768  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
769  %v = call <vscale x 4 x i32> @llvm.vp.udiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
770  ret <vscale x 4 x i32> %v
771}
772
773declare <vscale x 8 x i32> @llvm.vp.udiv.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
774
775define <vscale x 8 x i32> @vdivu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
776; CHECK-LABEL: vdivu_vv_nxv8i32:
777; CHECK:       # %bb.0:
778; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
779; CHECK-NEXT:    vdivu.vv v8, v8, v12, v0.t
780; CHECK-NEXT:    ret
781  %v = call <vscale x 8 x i32> @llvm.vp.udiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
782  ret <vscale x 8 x i32> %v
783}
784
785define <vscale x 8 x i32> @vdivu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
786; CHECK-LABEL: vdivu_vv_nxv8i32_unmasked:
787; CHECK:       # %bb.0:
788; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
789; CHECK-NEXT:    vdivu.vv v8, v8, v12
790; CHECK-NEXT:    ret
791  %v = call <vscale x 8 x i32> @llvm.vp.udiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
792  ret <vscale x 8 x i32> %v
793}
794
795define <vscale x 8 x i32> @vdivu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
796; CHECK-LABEL: vdivu_vx_nxv8i32:
797; CHECK:       # %bb.0:
798; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
799; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
800; CHECK-NEXT:    ret
801  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
802  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
803  %v = call <vscale x 8 x i32> @llvm.vp.udiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
804  ret <vscale x 8 x i32> %v
805}
806
807define <vscale x 8 x i32> @vdivu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
808; CHECK-LABEL: vdivu_vx_nxv8i32_unmasked:
809; CHECK:       # %bb.0:
810; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
811; CHECK-NEXT:    vdivu.vx v8, v8, a0
812; CHECK-NEXT:    ret
813  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
814  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
815  %v = call <vscale x 8 x i32> @llvm.vp.udiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
816  ret <vscale x 8 x i32> %v
817}
818
819declare <vscale x 16 x i32> @llvm.vp.udiv.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
820
821define <vscale x 16 x i32> @vdivu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
822; CHECK-LABEL: vdivu_vv_nxv16i32:
823; CHECK:       # %bb.0:
824; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
825; CHECK-NEXT:    vdivu.vv v8, v8, v16, v0.t
826; CHECK-NEXT:    ret
827  %v = call <vscale x 16 x i32> @llvm.vp.udiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
828  ret <vscale x 16 x i32> %v
829}
830
831define <vscale x 16 x i32> @vdivu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
832; CHECK-LABEL: vdivu_vv_nxv16i32_unmasked:
833; CHECK:       # %bb.0:
834; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
835; CHECK-NEXT:    vdivu.vv v8, v8, v16
836; CHECK-NEXT:    ret
837  %v = call <vscale x 16 x i32> @llvm.vp.udiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
838  ret <vscale x 16 x i32> %v
839}
840
841define <vscale x 16 x i32> @vdivu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
842; CHECK-LABEL: vdivu_vx_nxv16i32:
843; CHECK:       # %bb.0:
844; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
845; CHECK-NEXT:    vdivu.vx v8, v8, a0, v0.t
846; CHECK-NEXT:    ret
847  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
848  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
849  %v = call <vscale x 16 x i32> @llvm.vp.udiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
850  ret <vscale x 16 x i32> %v
851}
852
853define <vscale x 16 x i32> @vdivu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
854; CHECK-LABEL: vdivu_vx_nxv16i32_unmasked:
855; CHECK:       # %bb.0:
856; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
857; CHECK-NEXT:    vdivu.vx v8, v8, a0
858; CHECK-NEXT:    ret
859  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
860  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
861  %v = call <vscale x 16 x i32> @llvm.vp.udiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
862  ret <vscale x 16 x i32> %v
863}
864
865declare <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
866
867define <vscale x 1 x i64> @vdivu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
868; CHECK-LABEL: vdivu_vv_nxv1i64:
869; CHECK:       # %bb.0:
870; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
871; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
872; CHECK-NEXT:    ret
873  %v = call <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
874  ret <vscale x 1 x i64> %v
875}
876
877define <vscale x 1 x i64> @vdivu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
878; CHECK-LABEL: vdivu_vv_nxv1i64_unmasked:
879; CHECK:       # %bb.0:
880; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
881; CHECK-NEXT:    vdivu.vv v8, v8, v9
882; CHECK-NEXT:    ret
883  %v = call <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
884  ret <vscale x 1 x i64> %v
885}
886
887define <vscale x 1 x i64> @vdivu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
888; RV32-LABEL: vdivu_vx_nxv1i64:
889; RV32:       # %bb.0:
890; RV32-NEXT:    addi sp, sp, -16
891; RV32-NEXT:    .cfi_def_cfa_offset 16
892; RV32-NEXT:    sw a0, 8(sp)
893; RV32-NEXT:    sw a1, 12(sp)
894; RV32-NEXT:    addi a0, sp, 8
895; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
896; RV32-NEXT:    vlse64.v v9, (a0), zero
897; RV32-NEXT:    vdivu.vv v8, v8, v9, v0.t
898; RV32-NEXT:    addi sp, sp, 16
899; RV32-NEXT:    .cfi_def_cfa_offset 0
900; RV32-NEXT:    ret
901;
902; RV64-LABEL: vdivu_vx_nxv1i64:
903; RV64:       # %bb.0:
904; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
905; RV64-NEXT:    vdivu.vx v8, v8, a0, v0.t
906; RV64-NEXT:    ret
907  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
908  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
909  %v = call <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
910  ret <vscale x 1 x i64> %v
911}
912
913define <vscale x 1 x i64> @vdivu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
914; RV32-LABEL: vdivu_vx_nxv1i64_unmasked:
915; RV32:       # %bb.0:
916; RV32-NEXT:    addi sp, sp, -16
917; RV32-NEXT:    .cfi_def_cfa_offset 16
918; RV32-NEXT:    sw a0, 8(sp)
919; RV32-NEXT:    sw a1, 12(sp)
920; RV32-NEXT:    addi a0, sp, 8
921; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
922; RV32-NEXT:    vlse64.v v9, (a0), zero
923; RV32-NEXT:    vdivu.vv v8, v8, v9
924; RV32-NEXT:    addi sp, sp, 16
925; RV32-NEXT:    .cfi_def_cfa_offset 0
926; RV32-NEXT:    ret
927;
928; RV64-LABEL: vdivu_vx_nxv1i64_unmasked:
929; RV64:       # %bb.0:
930; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
931; RV64-NEXT:    vdivu.vx v8, v8, a0
932; RV64-NEXT:    ret
933  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
934  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
935  %v = call <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
936  ret <vscale x 1 x i64> %v
937}
938
939declare <vscale x 2 x i64> @llvm.vp.udiv.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
940
941define <vscale x 2 x i64> @vdivu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
942; CHECK-LABEL: vdivu_vv_nxv2i64:
943; CHECK:       # %bb.0:
944; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
945; CHECK-NEXT:    vdivu.vv v8, v8, v10, v0.t
946; CHECK-NEXT:    ret
947  %v = call <vscale x 2 x i64> @llvm.vp.udiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
948  ret <vscale x 2 x i64> %v
949}
950
951define <vscale x 2 x i64> @vdivu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
952; CHECK-LABEL: vdivu_vv_nxv2i64_unmasked:
953; CHECK:       # %bb.0:
954; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
955; CHECK-NEXT:    vdivu.vv v8, v8, v10
956; CHECK-NEXT:    ret
957  %v = call <vscale x 2 x i64> @llvm.vp.udiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
958  ret <vscale x 2 x i64> %v
959}
960
961define <vscale x 2 x i64> @vdivu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
962; RV32-LABEL: vdivu_vx_nxv2i64:
963; RV32:       # %bb.0:
964; RV32-NEXT:    addi sp, sp, -16
965; RV32-NEXT:    .cfi_def_cfa_offset 16
966; RV32-NEXT:    sw a0, 8(sp)
967; RV32-NEXT:    sw a1, 12(sp)
968; RV32-NEXT:    addi a0, sp, 8
969; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
970; RV32-NEXT:    vlse64.v v10, (a0), zero
971; RV32-NEXT:    vdivu.vv v8, v8, v10, v0.t
972; RV32-NEXT:    addi sp, sp, 16
973; RV32-NEXT:    .cfi_def_cfa_offset 0
974; RV32-NEXT:    ret
975;
976; RV64-LABEL: vdivu_vx_nxv2i64:
977; RV64:       # %bb.0:
978; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
979; RV64-NEXT:    vdivu.vx v8, v8, a0, v0.t
980; RV64-NEXT:    ret
981  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
982  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
983  %v = call <vscale x 2 x i64> @llvm.vp.udiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
984  ret <vscale x 2 x i64> %v
985}
986
987define <vscale x 2 x i64> @vdivu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
988; RV32-LABEL: vdivu_vx_nxv2i64_unmasked:
989; RV32:       # %bb.0:
990; RV32-NEXT:    addi sp, sp, -16
991; RV32-NEXT:    .cfi_def_cfa_offset 16
992; RV32-NEXT:    sw a0, 8(sp)
993; RV32-NEXT:    sw a1, 12(sp)
994; RV32-NEXT:    addi a0, sp, 8
995; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
996; RV32-NEXT:    vlse64.v v10, (a0), zero
997; RV32-NEXT:    vdivu.vv v8, v8, v10
998; RV32-NEXT:    addi sp, sp, 16
999; RV32-NEXT:    .cfi_def_cfa_offset 0
1000; RV32-NEXT:    ret
1001;
1002; RV64-LABEL: vdivu_vx_nxv2i64_unmasked:
1003; RV64:       # %bb.0:
1004; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1005; RV64-NEXT:    vdivu.vx v8, v8, a0
1006; RV64-NEXT:    ret
1007  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1008  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1009  %v = call <vscale x 2 x i64> @llvm.vp.udiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1010  ret <vscale x 2 x i64> %v
1011}
1012
1013declare <vscale x 4 x i64> @llvm.vp.udiv.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1014
1015define <vscale x 4 x i64> @vdivu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1016; CHECK-LABEL: vdivu_vv_nxv4i64:
1017; CHECK:       # %bb.0:
1018; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1019; CHECK-NEXT:    vdivu.vv v8, v8, v12, v0.t
1020; CHECK-NEXT:    ret
1021  %v = call <vscale x 4 x i64> @llvm.vp.udiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1022  ret <vscale x 4 x i64> %v
1023}
1024
1025define <vscale x 4 x i64> @vdivu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1026; CHECK-LABEL: vdivu_vv_nxv4i64_unmasked:
1027; CHECK:       # %bb.0:
1028; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1029; CHECK-NEXT:    vdivu.vv v8, v8, v12
1030; CHECK-NEXT:    ret
1031  %v = call <vscale x 4 x i64> @llvm.vp.udiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1032  ret <vscale x 4 x i64> %v
1033}
1034
1035define <vscale x 4 x i64> @vdivu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1036; RV32-LABEL: vdivu_vx_nxv4i64:
1037; RV32:       # %bb.0:
1038; RV32-NEXT:    addi sp, sp, -16
1039; RV32-NEXT:    .cfi_def_cfa_offset 16
1040; RV32-NEXT:    sw a0, 8(sp)
1041; RV32-NEXT:    sw a1, 12(sp)
1042; RV32-NEXT:    addi a0, sp, 8
1043; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1044; RV32-NEXT:    vlse64.v v12, (a0), zero
1045; RV32-NEXT:    vdivu.vv v8, v8, v12, v0.t
1046; RV32-NEXT:    addi sp, sp, 16
1047; RV32-NEXT:    .cfi_def_cfa_offset 0
1048; RV32-NEXT:    ret
1049;
1050; RV64-LABEL: vdivu_vx_nxv4i64:
1051; RV64:       # %bb.0:
1052; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1053; RV64-NEXT:    vdivu.vx v8, v8, a0, v0.t
1054; RV64-NEXT:    ret
1055  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1056  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1057  %v = call <vscale x 4 x i64> @llvm.vp.udiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1058  ret <vscale x 4 x i64> %v
1059}
1060
1061define <vscale x 4 x i64> @vdivu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1062; RV32-LABEL: vdivu_vx_nxv4i64_unmasked:
1063; RV32:       # %bb.0:
1064; RV32-NEXT:    addi sp, sp, -16
1065; RV32-NEXT:    .cfi_def_cfa_offset 16
1066; RV32-NEXT:    sw a0, 8(sp)
1067; RV32-NEXT:    sw a1, 12(sp)
1068; RV32-NEXT:    addi a0, sp, 8
1069; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1070; RV32-NEXT:    vlse64.v v12, (a0), zero
1071; RV32-NEXT:    vdivu.vv v8, v8, v12
1072; RV32-NEXT:    addi sp, sp, 16
1073; RV32-NEXT:    .cfi_def_cfa_offset 0
1074; RV32-NEXT:    ret
1075;
1076; RV64-LABEL: vdivu_vx_nxv4i64_unmasked:
1077; RV64:       # %bb.0:
1078; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1079; RV64-NEXT:    vdivu.vx v8, v8, a0
1080; RV64-NEXT:    ret
1081  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1082  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1083  %v = call <vscale x 4 x i64> @llvm.vp.udiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1084  ret <vscale x 4 x i64> %v
1085}
1086
1087declare <vscale x 8 x i64> @llvm.vp.udiv.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1088
1089define <vscale x 8 x i64> @vdivu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1090; CHECK-LABEL: vdivu_vv_nxv8i64:
1091; CHECK:       # %bb.0:
1092; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1093; CHECK-NEXT:    vdivu.vv v8, v8, v16, v0.t
1094; CHECK-NEXT:    ret
1095  %v = call <vscale x 8 x i64> @llvm.vp.udiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1096  ret <vscale x 8 x i64> %v
1097}
1098
1099define <vscale x 8 x i64> @vdivu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1100; CHECK-LABEL: vdivu_vv_nxv8i64_unmasked:
1101; CHECK:       # %bb.0:
1102; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1103; CHECK-NEXT:    vdivu.vv v8, v8, v16
1104; CHECK-NEXT:    ret
1105  %v = call <vscale x 8 x i64> @llvm.vp.udiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1106  ret <vscale x 8 x i64> %v
1107}
1108
1109define <vscale x 8 x i64> @vdivu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1110; RV32-LABEL: vdivu_vx_nxv8i64:
1111; RV32:       # %bb.0:
1112; RV32-NEXT:    addi sp, sp, -16
1113; RV32-NEXT:    .cfi_def_cfa_offset 16
1114; RV32-NEXT:    sw a0, 8(sp)
1115; RV32-NEXT:    sw a1, 12(sp)
1116; RV32-NEXT:    addi a0, sp, 8
1117; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1118; RV32-NEXT:    vlse64.v v16, (a0), zero
1119; RV32-NEXT:    vdivu.vv v8, v8, v16, v0.t
1120; RV32-NEXT:    addi sp, sp, 16
1121; RV32-NEXT:    .cfi_def_cfa_offset 0
1122; RV32-NEXT:    ret
1123;
1124; RV64-LABEL: vdivu_vx_nxv8i64:
1125; RV64:       # %bb.0:
1126; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1127; RV64-NEXT:    vdivu.vx v8, v8, a0, v0.t
1128; RV64-NEXT:    ret
1129  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1130  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1131  %v = call <vscale x 8 x i64> @llvm.vp.udiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1132  ret <vscale x 8 x i64> %v
1133}
1134
1135define <vscale x 8 x i64> @vdivu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1136; RV32-LABEL: vdivu_vx_nxv8i64_unmasked:
1137; RV32:       # %bb.0:
1138; RV32-NEXT:    addi sp, sp, -16
1139; RV32-NEXT:    .cfi_def_cfa_offset 16
1140; RV32-NEXT:    sw a0, 8(sp)
1141; RV32-NEXT:    sw a1, 12(sp)
1142; RV32-NEXT:    addi a0, sp, 8
1143; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1144; RV32-NEXT:    vlse64.v v16, (a0), zero
1145; RV32-NEXT:    vdivu.vv v8, v8, v16
1146; RV32-NEXT:    addi sp, sp, 16
1147; RV32-NEXT:    .cfi_def_cfa_offset 0
1148; RV32-NEXT:    ret
1149;
1150; RV64-LABEL: vdivu_vx_nxv8i64_unmasked:
1151; RV64:       # %bb.0:
1152; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1153; RV64-NEXT:    vdivu.vx v8, v8, a0
1154; RV64-NEXT:    ret
1155  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1156  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1157  %v = call <vscale x 8 x i64> @llvm.vp.udiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1158  ret <vscale x 8 x i64> %v
1159}
1160