xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll (revision 26766a00ff946c281b7dd517b2ba8d594012c21e)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4
5define <vscale x 1 x i8> @insertelt_nxv1i8_0(<vscale x 1 x i8> %v, i8 signext %elt) {
6; CHECK-LABEL: insertelt_nxv1i8_0:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
9; CHECK-NEXT:    vmv.s.x v8, a0
10; CHECK-NEXT:    ret
11  %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 0
12  ret <vscale x 1 x i8> %r
13}
14
15define <vscale x 1 x i8> @insertelt_nxv1i8_imm(<vscale x 1 x i8> %v, i8 signext %elt) {
16; CHECK-LABEL: insertelt_nxv1i8_imm:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    vsetivli zero, 4, e8, mf8, tu, ma
19; CHECK-NEXT:    vmv.s.x v9, a0
20; CHECK-NEXT:    vslideup.vi v8, v9, 3
21; CHECK-NEXT:    ret
22  %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 3
23  ret <vscale x 1 x i8> %r
24}
25
26define <vscale x 1 x i8> @insertelt_nxv1i8_idx(<vscale x 1 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
27; CHECK-LABEL: insertelt_nxv1i8_idx:
28; CHECK:       # %bb.0:
29; CHECK-NEXT:    addi a2, a1, 1
30; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
31; CHECK-NEXT:    vmv.s.x v9, a0
32; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, tu, ma
33; CHECK-NEXT:    vslideup.vx v8, v9, a1
34; CHECK-NEXT:    ret
35  %r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 %idx
36  ret <vscale x 1 x i8> %r
37}
38
39define <vscale x 2 x i8> @insertelt_nxv2i8_0(<vscale x 2 x i8> %v, i8 signext %elt) {
40; CHECK-LABEL: insertelt_nxv2i8_0:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
43; CHECK-NEXT:    vmv.s.x v8, a0
44; CHECK-NEXT:    ret
45  %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 0
46  ret <vscale x 2 x i8> %r
47}
48
49define <vscale x 2 x i8> @insertelt_nxv2i8_imm(<vscale x 2 x i8> %v, i8 signext %elt) {
50; CHECK-LABEL: insertelt_nxv2i8_imm:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, tu, ma
53; CHECK-NEXT:    vmv.s.x v9, a0
54; CHECK-NEXT:    vslideup.vi v8, v9, 3
55; CHECK-NEXT:    ret
56  %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 3
57  ret <vscale x 2 x i8> %r
58}
59
60define <vscale x 2 x i8> @insertelt_nxv2i8_idx(<vscale x 2 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
61; CHECK-LABEL: insertelt_nxv2i8_idx:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    addi a2, a1, 1
64; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
65; CHECK-NEXT:    vmv.s.x v9, a0
66; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, tu, ma
67; CHECK-NEXT:    vslideup.vx v8, v9, a1
68; CHECK-NEXT:    ret
69  %r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 %idx
70  ret <vscale x 2 x i8> %r
71}
72
73define <vscale x 4 x i8> @insertelt_nxv4i8_0(<vscale x 4 x i8> %v, i8 signext %elt) {
74; CHECK-LABEL: insertelt_nxv4i8_0:
75; CHECK:       # %bb.0:
76; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
77; CHECK-NEXT:    vmv.s.x v8, a0
78; CHECK-NEXT:    ret
79  %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 0
80  ret <vscale x 4 x i8> %r
81}
82
83define <vscale x 4 x i8> @insertelt_nxv4i8_imm(<vscale x 4 x i8> %v, i8 signext %elt) {
84; CHECK-LABEL: insertelt_nxv4i8_imm:
85; CHECK:       # %bb.0:
86; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
87; CHECK-NEXT:    vmv.s.x v9, a0
88; CHECK-NEXT:    vslideup.vi v8, v9, 3
89; CHECK-NEXT:    ret
90  %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 3
91  ret <vscale x 4 x i8> %r
92}
93
94define <vscale x 4 x i8> @insertelt_nxv4i8_idx(<vscale x 4 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
95; CHECK-LABEL: insertelt_nxv4i8_idx:
96; CHECK:       # %bb.0:
97; CHECK-NEXT:    addi a2, a1, 1
98; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
99; CHECK-NEXT:    vmv.s.x v9, a0
100; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, tu, ma
101; CHECK-NEXT:    vslideup.vx v8, v9, a1
102; CHECK-NEXT:    ret
103  %r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 %idx
104  ret <vscale x 4 x i8> %r
105}
106
107define <vscale x 8 x i8> @insertelt_nxv8i8_0(<vscale x 8 x i8> %v, i8 signext %elt) {
108; CHECK-LABEL: insertelt_nxv8i8_0:
109; CHECK:       # %bb.0:
110; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
111; CHECK-NEXT:    vmv.s.x v8, a0
112; CHECK-NEXT:    ret
113  %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 0
114  ret <vscale x 8 x i8> %r
115}
116
117define <vscale x 8 x i8> @insertelt_nxv8i8_imm(<vscale x 8 x i8> %v, i8 signext %elt) {
118; CHECK-LABEL: insertelt_nxv8i8_imm:
119; CHECK:       # %bb.0:
120; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
121; CHECK-NEXT:    vmv.s.x v9, a0
122; CHECK-NEXT:    vslideup.vi v8, v9, 3
123; CHECK-NEXT:    ret
124  %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 3
125  ret <vscale x 8 x i8> %r
126}
127
128define <vscale x 8 x i8> @insertelt_nxv8i8_idx(<vscale x 8 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
129; CHECK-LABEL: insertelt_nxv8i8_idx:
130; CHECK:       # %bb.0:
131; CHECK-NEXT:    addi a2, a1, 1
132; CHECK-NEXT:    vsetvli a3, zero, e8, m1, ta, ma
133; CHECK-NEXT:    vmv.s.x v9, a0
134; CHECK-NEXT:    vsetvli zero, a2, e8, m1, tu, ma
135; CHECK-NEXT:    vslideup.vx v8, v9, a1
136; CHECK-NEXT:    ret
137  %r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 %idx
138  ret <vscale x 8 x i8> %r
139}
140
141define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext %elt) {
142; CHECK-LABEL: insertelt_nxv16i8_0:
143; CHECK:       # %bb.0:
144; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
145; CHECK-NEXT:    vmv.s.x v8, a0
146; CHECK-NEXT:    ret
147  %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 0
148  ret <vscale x 16 x i8> %r
149}
150
151define <vscale x 16 x i8> @insertelt_nxv16i8_imm(<vscale x 16 x i8> %v, i8 signext %elt) {
152; CHECK-LABEL: insertelt_nxv16i8_imm:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
155; CHECK-NEXT:    vmv.s.x v10, a0
156; CHECK-NEXT:    vslideup.vi v8, v10, 3
157; CHECK-NEXT:    ret
158  %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 3
159  ret <vscale x 16 x i8> %r
160}
161
162define <vscale x 16 x i8> @insertelt_nxv16i8_idx(<vscale x 16 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
163; CHECK-LABEL: insertelt_nxv16i8_idx:
164; CHECK:       # %bb.0:
165; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, ma
166; CHECK-NEXT:    vmv.s.x v10, a0
167; CHECK-NEXT:    addi a0, a1, 1
168; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
169; CHECK-NEXT:    vslideup.vx v8, v10, a1
170; CHECK-NEXT:    ret
171  %r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 %idx
172  ret <vscale x 16 x i8> %r
173}
174
175define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext %elt) {
176; CHECK-LABEL: insertelt_nxv32i8_0:
177; CHECK:       # %bb.0:
178; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
179; CHECK-NEXT:    vmv.s.x v8, a0
180; CHECK-NEXT:    ret
181  %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 0
182  ret <vscale x 32 x i8> %r
183}
184
185define <vscale x 32 x i8> @insertelt_nxv32i8_imm(<vscale x 32 x i8> %v, i8 signext %elt) {
186; CHECK-LABEL: insertelt_nxv32i8_imm:
187; CHECK:       # %bb.0:
188; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
189; CHECK-NEXT:    vmv.s.x v12, a0
190; CHECK-NEXT:    vslideup.vi v8, v12, 3
191; CHECK-NEXT:    ret
192  %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 3
193  ret <vscale x 32 x i8> %r
194}
195
196define <vscale x 32 x i8> @insertelt_nxv32i8_idx(<vscale x 32 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
197; CHECK-LABEL: insertelt_nxv32i8_idx:
198; CHECK:       # %bb.0:
199; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, ma
200; CHECK-NEXT:    vmv.s.x v12, a0
201; CHECK-NEXT:    addi a0, a1, 1
202; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
203; CHECK-NEXT:    vslideup.vx v8, v12, a1
204; CHECK-NEXT:    ret
205  %r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 %idx
206  ret <vscale x 32 x i8> %r
207}
208
209define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext %elt) {
210; CHECK-LABEL: insertelt_nxv64i8_0:
211; CHECK:       # %bb.0:
212; CHECK-NEXT:    vsetvli a1, zero, e8, m1, tu, ma
213; CHECK-NEXT:    vmv.s.x v8, a0
214; CHECK-NEXT:    ret
215  %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 0
216  ret <vscale x 64 x i8> %r
217}
218
219define <vscale x 64 x i8> @insertelt_nxv64i8_imm(<vscale x 64 x i8> %v, i8 signext %elt) {
220; CHECK-LABEL: insertelt_nxv64i8_imm:
221; CHECK:       # %bb.0:
222; CHECK-NEXT:    vsetivli zero, 4, e8, m1, tu, ma
223; CHECK-NEXT:    vmv.s.x v16, a0
224; CHECK-NEXT:    vslideup.vi v8, v16, 3
225; CHECK-NEXT:    ret
226  %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 3
227  ret <vscale x 64 x i8> %r
228}
229
230define <vscale x 64 x i8> @insertelt_nxv64i8_idx(<vscale x 64 x i8> %v, i8 signext %elt, i32 zeroext %idx) {
231; CHECK-LABEL: insertelt_nxv64i8_idx:
232; CHECK:       # %bb.0:
233; CHECK-NEXT:    vsetvli a2, zero, e8, m1, ta, ma
234; CHECK-NEXT:    vmv.s.x v16, a0
235; CHECK-NEXT:    addi a0, a1, 1
236; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
237; CHECK-NEXT:    vslideup.vx v8, v16, a1
238; CHECK-NEXT:    ret
239  %r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 %idx
240  ret <vscale x 64 x i8> %r
241}
242
243define <vscale x 1 x i16> @insertelt_nxv1i16_0(<vscale x 1 x i16> %v, i16 signext %elt) {
244; CHECK-LABEL: insertelt_nxv1i16_0:
245; CHECK:       # %bb.0:
246; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
247; CHECK-NEXT:    vmv.s.x v8, a0
248; CHECK-NEXT:    ret
249  %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 0
250  ret <vscale x 1 x i16> %r
251}
252
253define <vscale x 1 x i16> @insertelt_nxv1i16_imm(<vscale x 1 x i16> %v, i16 signext %elt) {
254; CHECK-LABEL: insertelt_nxv1i16_imm:
255; CHECK:       # %bb.0:
256; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, tu, ma
257; CHECK-NEXT:    vmv.s.x v9, a0
258; CHECK-NEXT:    vslideup.vi v8, v9, 3
259; CHECK-NEXT:    ret
260  %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 3
261  ret <vscale x 1 x i16> %r
262}
263
264define <vscale x 1 x i16> @insertelt_nxv1i16_idx(<vscale x 1 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
265; CHECK-LABEL: insertelt_nxv1i16_idx:
266; CHECK:       # %bb.0:
267; CHECK-NEXT:    addi a2, a1, 1
268; CHECK-NEXT:    vsetvli a3, zero, e16, m1, ta, ma
269; CHECK-NEXT:    vmv.s.x v9, a0
270; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, tu, ma
271; CHECK-NEXT:    vslideup.vx v8, v9, a1
272; CHECK-NEXT:    ret
273  %r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 %idx
274  ret <vscale x 1 x i16> %r
275}
276
277define <vscale x 2 x i16> @insertelt_nxv2i16_0(<vscale x 2 x i16> %v, i16 signext %elt) {
278; CHECK-LABEL: insertelt_nxv2i16_0:
279; CHECK:       # %bb.0:
280; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
281; CHECK-NEXT:    vmv.s.x v8, a0
282; CHECK-NEXT:    ret
283  %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 0
284  ret <vscale x 2 x i16> %r
285}
286
287define <vscale x 2 x i16> @insertelt_nxv2i16_imm(<vscale x 2 x i16> %v, i16 signext %elt) {
288; CHECK-LABEL: insertelt_nxv2i16_imm:
289; CHECK:       # %bb.0:
290; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, ma
291; CHECK-NEXT:    vmv.s.x v9, a0
292; CHECK-NEXT:    vslideup.vi v8, v9, 3
293; CHECK-NEXT:    ret
294  %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 3
295  ret <vscale x 2 x i16> %r
296}
297
298define <vscale x 2 x i16> @insertelt_nxv2i16_idx(<vscale x 2 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
299; CHECK-LABEL: insertelt_nxv2i16_idx:
300; CHECK:       # %bb.0:
301; CHECK-NEXT:    addi a2, a1, 1
302; CHECK-NEXT:    vsetvli a3, zero, e16, m1, ta, ma
303; CHECK-NEXT:    vmv.s.x v9, a0
304; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, tu, ma
305; CHECK-NEXT:    vslideup.vx v8, v9, a1
306; CHECK-NEXT:    ret
307  %r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 %idx
308  ret <vscale x 2 x i16> %r
309}
310
311define <vscale x 4 x i16> @insertelt_nxv4i16_0(<vscale x 4 x i16> %v, i16 signext %elt) {
312; CHECK-LABEL: insertelt_nxv4i16_0:
313; CHECK:       # %bb.0:
314; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
315; CHECK-NEXT:    vmv.s.x v8, a0
316; CHECK-NEXT:    ret
317  %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 0
318  ret <vscale x 4 x i16> %r
319}
320
321define <vscale x 4 x i16> @insertelt_nxv4i16_imm(<vscale x 4 x i16> %v, i16 signext %elt) {
322; CHECK-LABEL: insertelt_nxv4i16_imm:
323; CHECK:       # %bb.0:
324; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
325; CHECK-NEXT:    vmv.s.x v9, a0
326; CHECK-NEXT:    vslideup.vi v8, v9, 3
327; CHECK-NEXT:    ret
328  %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 3
329  ret <vscale x 4 x i16> %r
330}
331
332define <vscale x 4 x i16> @insertelt_nxv4i16_idx(<vscale x 4 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
333; CHECK-LABEL: insertelt_nxv4i16_idx:
334; CHECK:       # %bb.0:
335; CHECK-NEXT:    addi a2, a1, 1
336; CHECK-NEXT:    vsetvli a3, zero, e16, m1, ta, ma
337; CHECK-NEXT:    vmv.s.x v9, a0
338; CHECK-NEXT:    vsetvli zero, a2, e16, m1, tu, ma
339; CHECK-NEXT:    vslideup.vx v8, v9, a1
340; CHECK-NEXT:    ret
341  %r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 %idx
342  ret <vscale x 4 x i16> %r
343}
344
345define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signext %elt) {
346; CHECK-LABEL: insertelt_nxv8i16_0:
347; CHECK:       # %bb.0:
348; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
349; CHECK-NEXT:    vmv.s.x v8, a0
350; CHECK-NEXT:    ret
351  %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 0
352  ret <vscale x 8 x i16> %r
353}
354
355define <vscale x 8 x i16> @insertelt_nxv8i16_imm(<vscale x 8 x i16> %v, i16 signext %elt) {
356; CHECK-LABEL: insertelt_nxv8i16_imm:
357; CHECK:       # %bb.0:
358; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
359; CHECK-NEXT:    vmv.s.x v10, a0
360; CHECK-NEXT:    vslideup.vi v8, v10, 3
361; CHECK-NEXT:    ret
362  %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 3
363  ret <vscale x 8 x i16> %r
364}
365
366define <vscale x 8 x i16> @insertelt_nxv8i16_idx(<vscale x 8 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
367; CHECK-LABEL: insertelt_nxv8i16_idx:
368; CHECK:       # %bb.0:
369; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
370; CHECK-NEXT:    vmv.s.x v10, a0
371; CHECK-NEXT:    addi a0, a1, 1
372; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
373; CHECK-NEXT:    vslideup.vx v8, v10, a1
374; CHECK-NEXT:    ret
375  %r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 %idx
376  ret <vscale x 8 x i16> %r
377}
378
379define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 signext %elt) {
380; CHECK-LABEL: insertelt_nxv16i16_0:
381; CHECK:       # %bb.0:
382; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
383; CHECK-NEXT:    vmv.s.x v8, a0
384; CHECK-NEXT:    ret
385  %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 0
386  ret <vscale x 16 x i16> %r
387}
388
389define <vscale x 16 x i16> @insertelt_nxv16i16_imm(<vscale x 16 x i16> %v, i16 signext %elt) {
390; CHECK-LABEL: insertelt_nxv16i16_imm:
391; CHECK:       # %bb.0:
392; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
393; CHECK-NEXT:    vmv.s.x v12, a0
394; CHECK-NEXT:    vslideup.vi v8, v12, 3
395; CHECK-NEXT:    ret
396  %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 3
397  ret <vscale x 16 x i16> %r
398}
399
400define <vscale x 16 x i16> @insertelt_nxv16i16_idx(<vscale x 16 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
401; CHECK-LABEL: insertelt_nxv16i16_idx:
402; CHECK:       # %bb.0:
403; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
404; CHECK-NEXT:    vmv.s.x v12, a0
405; CHECK-NEXT:    addi a0, a1, 1
406; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
407; CHECK-NEXT:    vslideup.vx v8, v12, a1
408; CHECK-NEXT:    ret
409  %r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 %idx
410  ret <vscale x 16 x i16> %r
411}
412
413define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 signext %elt) {
414; CHECK-LABEL: insertelt_nxv32i16_0:
415; CHECK:       # %bb.0:
416; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
417; CHECK-NEXT:    vmv.s.x v8, a0
418; CHECK-NEXT:    ret
419  %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 0
420  ret <vscale x 32 x i16> %r
421}
422
423define <vscale x 32 x i16> @insertelt_nxv32i16_imm(<vscale x 32 x i16> %v, i16 signext %elt) {
424; CHECK-LABEL: insertelt_nxv32i16_imm:
425; CHECK:       # %bb.0:
426; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
427; CHECK-NEXT:    vmv.s.x v16, a0
428; CHECK-NEXT:    vslideup.vi v8, v16, 3
429; CHECK-NEXT:    ret
430  %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 3
431  ret <vscale x 32 x i16> %r
432}
433
434define <vscale x 32 x i16> @insertelt_nxv32i16_idx(<vscale x 32 x i16> %v, i16 signext %elt, i32 zeroext %idx) {
435; CHECK-LABEL: insertelt_nxv32i16_idx:
436; CHECK:       # %bb.0:
437; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
438; CHECK-NEXT:    vmv.s.x v16, a0
439; CHECK-NEXT:    addi a0, a1, 1
440; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
441; CHECK-NEXT:    vslideup.vx v8, v16, a1
442; CHECK-NEXT:    ret
443  %r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 %idx
444  ret <vscale x 32 x i16> %r
445}
446
447define <vscale x 1 x i32> @insertelt_nxv1i32_0(<vscale x 1 x i32> %v, i32 signext %elt) {
448; CHECK-LABEL: insertelt_nxv1i32_0:
449; CHECK:       # %bb.0:
450; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
451; CHECK-NEXT:    vmv.s.x v8, a0
452; CHECK-NEXT:    ret
453  %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 0
454  ret <vscale x 1 x i32> %r
455}
456
457define <vscale x 1 x i32> @insertelt_nxv1i32_imm(<vscale x 1 x i32> %v, i32 signext %elt) {
458; CHECK-LABEL: insertelt_nxv1i32_imm:
459; CHECK:       # %bb.0:
460; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, tu, ma
461; CHECK-NEXT:    vmv.s.x v9, a0
462; CHECK-NEXT:    vslideup.vi v8, v9, 3
463; CHECK-NEXT:    ret
464  %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 3
465  ret <vscale x 1 x i32> %r
466}
467
468define <vscale x 1 x i32> @insertelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 signext %elt, i32 zeroext %idx) {
469; CHECK-LABEL: insertelt_nxv1i32_idx:
470; CHECK:       # %bb.0:
471; CHECK-NEXT:    addi a2, a1, 1
472; CHECK-NEXT:    vsetvli a3, zero, e32, m1, ta, ma
473; CHECK-NEXT:    vmv.s.x v9, a0
474; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, tu, ma
475; CHECK-NEXT:    vslideup.vx v8, v9, a1
476; CHECK-NEXT:    ret
477  %r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 %idx
478  ret <vscale x 1 x i32> %r
479}
480
481define <vscale x 2 x i32> @insertelt_nxv2i32_0(<vscale x 2 x i32> %v, i32 signext %elt) {
482; CHECK-LABEL: insertelt_nxv2i32_0:
483; CHECK:       # %bb.0:
484; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
485; CHECK-NEXT:    vmv.s.x v8, a0
486; CHECK-NEXT:    ret
487  %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 0
488  ret <vscale x 2 x i32> %r
489}
490
491define <vscale x 2 x i32> @insertelt_nxv2i32_imm(<vscale x 2 x i32> %v, i32 signext %elt) {
492; CHECK-LABEL: insertelt_nxv2i32_imm:
493; CHECK:       # %bb.0:
494; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
495; CHECK-NEXT:    vmv.s.x v9, a0
496; CHECK-NEXT:    vslideup.vi v8, v9, 3
497; CHECK-NEXT:    ret
498  %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 3
499  ret <vscale x 2 x i32> %r
500}
501
502define <vscale x 2 x i32> @insertelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 signext %elt, i32 zeroext %idx) {
503; CHECK-LABEL: insertelt_nxv2i32_idx:
504; CHECK:       # %bb.0:
505; CHECK-NEXT:    addi a2, a1, 1
506; CHECK-NEXT:    vsetvli a3, zero, e32, m1, ta, ma
507; CHECK-NEXT:    vmv.s.x v9, a0
508; CHECK-NEXT:    vsetvli zero, a2, e32, m1, tu, ma
509; CHECK-NEXT:    vslideup.vx v8, v9, a1
510; CHECK-NEXT:    ret
511  %r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 %idx
512  ret <vscale x 2 x i32> %r
513}
514
515define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 signext %elt) {
516; CHECK-LABEL: insertelt_nxv4i32_0:
517; CHECK:       # %bb.0:
518; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
519; CHECK-NEXT:    vmv.s.x v8, a0
520; CHECK-NEXT:    ret
521  %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 0
522  ret <vscale x 4 x i32> %r
523}
524
525define <vscale x 4 x i32> @insertelt_nxv4i32_imm(<vscale x 4 x i32> %v, i32 signext %elt) {
526; CHECK-LABEL: insertelt_nxv4i32_imm:
527; CHECK:       # %bb.0:
528; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
529; CHECK-NEXT:    vmv.s.x v10, a0
530; CHECK-NEXT:    vslideup.vi v8, v10, 3
531; CHECK-NEXT:    ret
532  %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 3
533  ret <vscale x 4 x i32> %r
534}
535
536define <vscale x 4 x i32> @insertelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 signext %elt, i32 zeroext %idx) {
537; CHECK-LABEL: insertelt_nxv4i32_idx:
538; CHECK:       # %bb.0:
539; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
540; CHECK-NEXT:    vmv.s.x v10, a0
541; CHECK-NEXT:    addi a0, a1, 1
542; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
543; CHECK-NEXT:    vslideup.vx v8, v10, a1
544; CHECK-NEXT:    ret
545  %r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 %idx
546  ret <vscale x 4 x i32> %r
547}
548
549define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 signext %elt) {
550; CHECK-LABEL: insertelt_nxv8i32_0:
551; CHECK:       # %bb.0:
552; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
553; CHECK-NEXT:    vmv.s.x v8, a0
554; CHECK-NEXT:    ret
555  %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 0
556  ret <vscale x 8 x i32> %r
557}
558
559define <vscale x 8 x i32> @insertelt_nxv8i32_imm(<vscale x 8 x i32> %v, i32 signext %elt) {
560; CHECK-LABEL: insertelt_nxv8i32_imm:
561; CHECK:       # %bb.0:
562; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
563; CHECK-NEXT:    vmv.s.x v12, a0
564; CHECK-NEXT:    vslideup.vi v8, v12, 3
565; CHECK-NEXT:    ret
566  %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 3
567  ret <vscale x 8 x i32> %r
568}
569
570define <vscale x 8 x i32> @insertelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 signext %elt, i32 zeroext %idx) {
571; CHECK-LABEL: insertelt_nxv8i32_idx:
572; CHECK:       # %bb.0:
573; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
574; CHECK-NEXT:    vmv.s.x v12, a0
575; CHECK-NEXT:    addi a0, a1, 1
576; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
577; CHECK-NEXT:    vslideup.vx v8, v12, a1
578; CHECK-NEXT:    ret
579  %r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 %idx
580  ret <vscale x 8 x i32> %r
581}
582
583define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 signext %elt) {
584; CHECK-LABEL: insertelt_nxv16i32_0:
585; CHECK:       # %bb.0:
586; CHECK-NEXT:    vsetvli a1, zero, e32, m1, tu, ma
587; CHECK-NEXT:    vmv.s.x v8, a0
588; CHECK-NEXT:    ret
589  %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 0
590  ret <vscale x 16 x i32> %r
591}
592
593define <vscale x 16 x i32> @insertelt_nxv16i32_imm(<vscale x 16 x i32> %v, i32 signext %elt) {
594; CHECK-LABEL: insertelt_nxv16i32_imm:
595; CHECK:       # %bb.0:
596; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
597; CHECK-NEXT:    vmv.s.x v16, a0
598; CHECK-NEXT:    vslideup.vi v8, v16, 3
599; CHECK-NEXT:    ret
600  %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 3
601  ret <vscale x 16 x i32> %r
602}
603
604define <vscale x 16 x i32> @insertelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 signext %elt, i32 zeroext %idx) {
605; CHECK-LABEL: insertelt_nxv16i32_idx:
606; CHECK:       # %bb.0:
607; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
608; CHECK-NEXT:    vmv.s.x v16, a0
609; CHECK-NEXT:    addi a0, a1, 1
610; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
611; CHECK-NEXT:    vslideup.vx v8, v16, a1
612; CHECK-NEXT:    ret
613  %r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 %idx
614  ret <vscale x 16 x i32> %r
615}
616
617define <vscale x 1 x i64> @insertelt_nxv1i64_0(<vscale x 1 x i64> %v, i64 %elt) {
618; CHECK-LABEL: insertelt_nxv1i64_0:
619; CHECK:       # %bb.0:
620; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, ma
621; CHECK-NEXT:    vmv.s.x v8, a0
622; CHECK-NEXT:    ret
623  %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 0
624  ret <vscale x 1 x i64> %r
625}
626
627define <vscale x 1 x i64> @insertelt_nxv1i64_imm(<vscale x 1 x i64> %v, i64 %elt) {
628; CHECK-LABEL: insertelt_nxv1i64_imm:
629; CHECK:       # %bb.0:
630; CHECK-NEXT:    vsetivli zero, 4, e64, m1, tu, ma
631; CHECK-NEXT:    vmv.s.x v9, a0
632; CHECK-NEXT:    vslideup.vi v8, v9, 3
633; CHECK-NEXT:    ret
634  %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 3
635  ret <vscale x 1 x i64> %r
636}
637
638define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt, i32 %idx) {
639; CHECK-LABEL: insertelt_nxv1i64_idx:
640; CHECK:       # %bb.0:
641; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
642; CHECK-NEXT:    vmv.s.x v9, a0
643; CHECK-NEXT:    slli a1, a1, 32
644; CHECK-NEXT:    srli a1, a1, 32
645; CHECK-NEXT:    addi a0, a1, 1
646; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
647; CHECK-NEXT:    vslideup.vx v8, v9, a1
648; CHECK-NEXT:    ret
649  %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 %idx
650  ret <vscale x 1 x i64> %r
651}
652
653define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt) {
654; CHECK-LABEL: insertelt_nxv2i64_0:
655; CHECK:       # %bb.0:
656; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, ma
657; CHECK-NEXT:    vmv.s.x v8, a0
658; CHECK-NEXT:    ret
659  %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 0
660  ret <vscale x 2 x i64> %r
661}
662
663define <vscale x 2 x i64> @insertelt_nxv2i64_imm(<vscale x 2 x i64> %v, i64 %elt) {
664; CHECK-LABEL: insertelt_nxv2i64_imm:
665; CHECK:       # %bb.0:
666; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
667; CHECK-NEXT:    vmv.s.x v10, a0
668; CHECK-NEXT:    vslideup.vi v8, v10, 3
669; CHECK-NEXT:    ret
670  %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 3
671  ret <vscale x 2 x i64> %r
672}
673
674define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt, i32 %idx) {
675; CHECK-LABEL: insertelt_nxv2i64_idx:
676; CHECK:       # %bb.0:
677; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
678; CHECK-NEXT:    vmv.s.x v10, a0
679; CHECK-NEXT:    slli a1, a1, 32
680; CHECK-NEXT:    srli a1, a1, 32
681; CHECK-NEXT:    addi a0, a1, 1
682; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
683; CHECK-NEXT:    vslideup.vx v8, v10, a1
684; CHECK-NEXT:    ret
685  %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 %idx
686  ret <vscale x 2 x i64> %r
687}
688
689define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt) {
690; CHECK-LABEL: insertelt_nxv4i64_0:
691; CHECK:       # %bb.0:
692; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, ma
693; CHECK-NEXT:    vmv.s.x v8, a0
694; CHECK-NEXT:    ret
695  %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 0
696  ret <vscale x 4 x i64> %r
697}
698
699define <vscale x 4 x i64> @insertelt_nxv4i64_imm(<vscale x 4 x i64> %v, i64 %elt) {
700; CHECK-LABEL: insertelt_nxv4i64_imm:
701; CHECK:       # %bb.0:
702; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
703; CHECK-NEXT:    vmv.s.x v12, a0
704; CHECK-NEXT:    vslideup.vi v8, v12, 3
705; CHECK-NEXT:    ret
706  %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 3
707  ret <vscale x 4 x i64> %r
708}
709
710define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt, i32 %idx) {
711; CHECK-LABEL: insertelt_nxv4i64_idx:
712; CHECK:       # %bb.0:
713; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
714; CHECK-NEXT:    vmv.s.x v12, a0
715; CHECK-NEXT:    slli a1, a1, 32
716; CHECK-NEXT:    srli a1, a1, 32
717; CHECK-NEXT:    addi a0, a1, 1
718; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
719; CHECK-NEXT:    vslideup.vx v8, v12, a1
720; CHECK-NEXT:    ret
721  %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 %idx
722  ret <vscale x 4 x i64> %r
723}
724
725define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt) {
726; CHECK-LABEL: insertelt_nxv8i64_0:
727; CHECK:       # %bb.0:
728; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, ma
729; CHECK-NEXT:    vmv.s.x v8, a0
730; CHECK-NEXT:    ret
731  %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 0
732  ret <vscale x 8 x i64> %r
733}
734
735define <vscale x 8 x i64> @insertelt_nxv8i64_imm(<vscale x 8 x i64> %v, i64 %elt) {
736; CHECK-LABEL: insertelt_nxv8i64_imm:
737; CHECK:       # %bb.0:
738; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
739; CHECK-NEXT:    vmv.s.x v16, a0
740; CHECK-NEXT:    vslideup.vi v8, v16, 3
741; CHECK-NEXT:    ret
742  %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 3
743  ret <vscale x 8 x i64> %r
744}
745
746define <vscale x 8 x i64> @insertelt_nxv8i64_idx(<vscale x 8 x i64> %v, i64 %elt, i32 %idx) {
747; CHECK-LABEL: insertelt_nxv8i64_idx:
748; CHECK:       # %bb.0:
749; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
750; CHECK-NEXT:    vmv.s.x v16, a0
751; CHECK-NEXT:    slli a1, a1, 32
752; CHECK-NEXT:    srli a1, a1, 32
753; CHECK-NEXT:    addi a0, a1, 1
754; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, ma
755; CHECK-NEXT:    vslideup.vx v8, v16, a1
756; CHECK-NEXT:    ret
757  %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 %idx
758  ret <vscale x 8 x i64> %r
759}
760