xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll (revision 496187b3b81ea76a8b67d796609d7f09992cf96d)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zfbfmin,+zvfh,+zvfbfmin,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zfbfmin,+zvfh,+zvfbfmin,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zfbfmin,+zvfhmin,+zvfbfmin,+v -target-abi=ilp32d \
7; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zfbfmin,+zvfhmin,+zvfbfmin,+v -target-abi=lp64d \
9; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
10
11define <vscale x 1 x bfloat> @insertelt_nxv1bf16_0(<vscale x 1 x bfloat> %v, bfloat %elt) {
12; CHECK-LABEL: insertelt_nxv1bf16_0:
13; CHECK:       # %bb.0:
14; CHECK-NEXT:    fmv.x.h a0, fa0
15; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
16; CHECK-NEXT:    vmv.s.x v8, a0
17; CHECK-NEXT:    ret
18  %r = insertelement <vscale x 1 x bfloat> %v, bfloat %elt, i32 0
19  ret <vscale x 1 x bfloat> %r
20}
21
22define <vscale x 1 x bfloat> @insertelt_nxv1bf16_imm(<vscale x 1 x bfloat> %v, bfloat %elt) {
23; CHECK-LABEL: insertelt_nxv1bf16_imm:
24; CHECK:       # %bb.0:
25; CHECK-NEXT:    fmv.x.h a0, fa0
26; CHECK-NEXT:    vsetivli zero, 4, e16, mf4, tu, ma
27; CHECK-NEXT:    vmv.s.x v9, a0
28; CHECK-NEXT:    vslideup.vi v8, v9, 3
29; CHECK-NEXT:    ret
30  %r = insertelement <vscale x 1 x bfloat> %v, bfloat %elt, i32 3
31  ret <vscale x 1 x bfloat> %r
32}
33
34define <vscale x 1 x bfloat> @insertelt_nxv1bf16_idx(<vscale x 1 x bfloat> %v, bfloat %elt, i32 zeroext %idx) {
35; CHECK-LABEL: insertelt_nxv1bf16_idx:
36; CHECK:       # %bb.0:
37; CHECK-NEXT:    addi a1, a0, 1
38; CHECK-NEXT:    fmv.x.h a2, fa0
39; CHECK-NEXT:    vsetvli a3, zero, e16, m1, ta, ma
40; CHECK-NEXT:    vmv.s.x v9, a2
41; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
42; CHECK-NEXT:    vslideup.vx v8, v9, a0
43; CHECK-NEXT:    ret
44  %r = insertelement <vscale x 1 x bfloat> %v, bfloat %elt, i32 %idx
45  ret <vscale x 1 x bfloat> %r
46}
47
48define <vscale x 2 x bfloat> @insertelt_nxv2bf16_0(<vscale x 2 x bfloat> %v, bfloat %elt) {
49; CHECK-LABEL: insertelt_nxv2bf16_0:
50; CHECK:       # %bb.0:
51; CHECK-NEXT:    fmv.x.h a0, fa0
52; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
53; CHECK-NEXT:    vmv.s.x v8, a0
54; CHECK-NEXT:    ret
55  %r = insertelement <vscale x 2 x bfloat> %v, bfloat %elt, i32 0
56  ret <vscale x 2 x bfloat> %r
57}
58
59define <vscale x 2 x bfloat> @insertelt_nxv2bf16_imm(<vscale x 2 x bfloat> %v, bfloat %elt) {
60; CHECK-LABEL: insertelt_nxv2bf16_imm:
61; CHECK:       # %bb.0:
62; CHECK-NEXT:    fmv.x.h a0, fa0
63; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, tu, ma
64; CHECK-NEXT:    vmv.s.x v9, a0
65; CHECK-NEXT:    vslideup.vi v8, v9, 3
66; CHECK-NEXT:    ret
67  %r = insertelement <vscale x 2 x bfloat> %v, bfloat %elt, i32 3
68  ret <vscale x 2 x bfloat> %r
69}
70
71define <vscale x 2 x bfloat> @insertelt_nxv2bf16_idx(<vscale x 2 x bfloat> %v, bfloat %elt, i32 zeroext %idx) {
72; CHECK-LABEL: insertelt_nxv2bf16_idx:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    addi a1, a0, 1
75; CHECK-NEXT:    fmv.x.h a2, fa0
76; CHECK-NEXT:    vsetvli a3, zero, e16, m1, ta, ma
77; CHECK-NEXT:    vmv.s.x v9, a2
78; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
79; CHECK-NEXT:    vslideup.vx v8, v9, a0
80; CHECK-NEXT:    ret
81  %r = insertelement <vscale x 2 x bfloat> %v, bfloat %elt, i32 %idx
82  ret <vscale x 2 x bfloat> %r
83}
84
85define <vscale x 4 x bfloat> @insertelt_nxv4bf16_0(<vscale x 4 x bfloat> %v, bfloat %elt) {
86; CHECK-LABEL: insertelt_nxv4bf16_0:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    fmv.x.h a0, fa0
89; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
90; CHECK-NEXT:    vmv.s.x v8, a0
91; CHECK-NEXT:    ret
92  %r = insertelement <vscale x 4 x bfloat> %v, bfloat %elt, i32 0
93  ret <vscale x 4 x bfloat> %r
94}
95
96define <vscale x 4 x bfloat> @insertelt_nxv4bf16_imm(<vscale x 4 x bfloat> %v, bfloat %elt) {
97; CHECK-LABEL: insertelt_nxv4bf16_imm:
98; CHECK:       # %bb.0:
99; CHECK-NEXT:    fmv.x.h a0, fa0
100; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
101; CHECK-NEXT:    vmv.s.x v9, a0
102; CHECK-NEXT:    vslideup.vi v8, v9, 3
103; CHECK-NEXT:    ret
104  %r = insertelement <vscale x 4 x bfloat> %v, bfloat %elt, i32 3
105  ret <vscale x 4 x bfloat> %r
106}
107
108define <vscale x 4 x bfloat> @insertelt_nxv4bf16_idx(<vscale x 4 x bfloat> %v, bfloat %elt, i32 zeroext %idx) {
109; CHECK-LABEL: insertelt_nxv4bf16_idx:
110; CHECK:       # %bb.0:
111; CHECK-NEXT:    addi a1, a0, 1
112; CHECK-NEXT:    fmv.x.h a2, fa0
113; CHECK-NEXT:    vsetvli a3, zero, e16, m1, ta, ma
114; CHECK-NEXT:    vmv.s.x v9, a2
115; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
116; CHECK-NEXT:    vslideup.vx v8, v9, a0
117; CHECK-NEXT:    ret
118  %r = insertelement <vscale x 4 x bfloat> %v, bfloat %elt, i32 %idx
119  ret <vscale x 4 x bfloat> %r
120}
121
122define <vscale x 8 x bfloat> @insertelt_nxv8bf16_0(<vscale x 8 x bfloat> %v, bfloat %elt) {
123; CHECK-LABEL: insertelt_nxv8bf16_0:
124; CHECK:       # %bb.0:
125; CHECK-NEXT:    fmv.x.h a0, fa0
126; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
127; CHECK-NEXT:    vmv.s.x v8, a0
128; CHECK-NEXT:    ret
129  %r = insertelement <vscale x 8 x bfloat> %v, bfloat %elt, i32 0
130  ret <vscale x 8 x bfloat> %r
131}
132
133define <vscale x 8 x bfloat> @insertelt_nxv8bf16_imm(<vscale x 8 x bfloat> %v, bfloat %elt) {
134; CHECK-LABEL: insertelt_nxv8bf16_imm:
135; CHECK:       # %bb.0:
136; CHECK-NEXT:    fmv.x.h a0, fa0
137; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
138; CHECK-NEXT:    vmv.s.x v10, a0
139; CHECK-NEXT:    vslideup.vi v8, v10, 3
140; CHECK-NEXT:    ret
141  %r = insertelement <vscale x 8 x bfloat> %v, bfloat %elt, i32 3
142  ret <vscale x 8 x bfloat> %r
143}
144
145define <vscale x 8 x bfloat> @insertelt_nxv8bf16_idx(<vscale x 8 x bfloat> %v, bfloat %elt, i32 zeroext %idx) {
146; CHECK-LABEL: insertelt_nxv8bf16_idx:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    fmv.x.h a1, fa0
149; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
150; CHECK-NEXT:    vmv.s.x v10, a1
151; CHECK-NEXT:    addi a1, a0, 1
152; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
153; CHECK-NEXT:    vslideup.vx v8, v10, a0
154; CHECK-NEXT:    ret
155  %r = insertelement <vscale x 8 x bfloat> %v, bfloat %elt, i32 %idx
156  ret <vscale x 8 x bfloat> %r
157}
158
159define <vscale x 16 x bfloat> @insertelt_nxv16bf16_0(<vscale x 16 x bfloat> %v, bfloat %elt) {
160; CHECK-LABEL: insertelt_nxv16bf16_0:
161; CHECK:       # %bb.0:
162; CHECK-NEXT:    fmv.x.h a0, fa0
163; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
164; CHECK-NEXT:    vmv.s.x v8, a0
165; CHECK-NEXT:    ret
166  %r = insertelement <vscale x 16 x bfloat> %v, bfloat %elt, i32 0
167  ret <vscale x 16 x bfloat> %r
168}
169
170define <vscale x 16 x bfloat> @insertelt_nxv16bf16_imm(<vscale x 16 x bfloat> %v, bfloat %elt) {
171; CHECK-LABEL: insertelt_nxv16bf16_imm:
172; CHECK:       # %bb.0:
173; CHECK-NEXT:    fmv.x.h a0, fa0
174; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
175; CHECK-NEXT:    vmv.s.x v12, a0
176; CHECK-NEXT:    vslideup.vi v8, v12, 3
177; CHECK-NEXT:    ret
178  %r = insertelement <vscale x 16 x bfloat> %v, bfloat %elt, i32 3
179  ret <vscale x 16 x bfloat> %r
180}
181
182define <vscale x 16 x bfloat> @insertelt_nxv16bf16_idx(<vscale x 16 x bfloat> %v, bfloat %elt, i32 zeroext %idx) {
183; CHECK-LABEL: insertelt_nxv16bf16_idx:
184; CHECK:       # %bb.0:
185; CHECK-NEXT:    fmv.x.h a1, fa0
186; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
187; CHECK-NEXT:    vmv.s.x v12, a1
188; CHECK-NEXT:    addi a1, a0, 1
189; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
190; CHECK-NEXT:    vslideup.vx v8, v12, a0
191; CHECK-NEXT:    ret
192  %r = insertelement <vscale x 16 x bfloat> %v, bfloat %elt, i32 %idx
193  ret <vscale x 16 x bfloat> %r
194}
195
196define <vscale x 32 x bfloat> @insertelt_nxv32bf16_0(<vscale x 32 x bfloat> %v, bfloat %elt) {
197; CHECK-LABEL: insertelt_nxv32bf16_0:
198; CHECK:       # %bb.0:
199; CHECK-NEXT:    fmv.x.h a0, fa0
200; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
201; CHECK-NEXT:    vmv.s.x v8, a0
202; CHECK-NEXT:    ret
203  %r = insertelement <vscale x 32 x bfloat> %v, bfloat %elt, i32 0
204  ret <vscale x 32 x bfloat> %r
205}
206
207define <vscale x 32 x bfloat> @insertelt_nxv32bf16_imm(<vscale x 32 x bfloat> %v, bfloat %elt) {
208; CHECK-LABEL: insertelt_nxv32bf16_imm:
209; CHECK:       # %bb.0:
210; CHECK-NEXT:    fmv.x.h a0, fa0
211; CHECK-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
212; CHECK-NEXT:    vmv.s.x v16, a0
213; CHECK-NEXT:    vslideup.vi v8, v16, 3
214; CHECK-NEXT:    ret
215  %r = insertelement <vscale x 32 x bfloat> %v, bfloat %elt, i32 3
216  ret <vscale x 32 x bfloat> %r
217}
218
219define <vscale x 32 x bfloat> @insertelt_nxv32bf16_idx(<vscale x 32 x bfloat> %v, bfloat %elt, i32 zeroext %idx) {
220; CHECK-LABEL: insertelt_nxv32bf16_idx:
221; CHECK:       # %bb.0:
222; CHECK-NEXT:    fmv.x.h a1, fa0
223; CHECK-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
224; CHECK-NEXT:    vmv.s.x v16, a1
225; CHECK-NEXT:    addi a1, a0, 1
226; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
227; CHECK-NEXT:    vslideup.vx v8, v16, a0
228; CHECK-NEXT:    ret
229  %r = insertelement <vscale x 32 x bfloat> %v, bfloat %elt, i32 %idx
230  ret <vscale x 32 x bfloat> %r
231}
232
233define <vscale x 1 x half> @insertelt_nxv1f16_0(<vscale x 1 x half> %v, half %elt) {
234; ZVFH-LABEL: insertelt_nxv1f16_0:
235; ZVFH:       # %bb.0:
236; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
237; ZVFH-NEXT:    vfmv.s.f v8, fa0
238; ZVFH-NEXT:    ret
239;
240; ZVFHMIN-LABEL: insertelt_nxv1f16_0:
241; ZVFHMIN:       # %bb.0:
242; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
243; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
244; ZVFHMIN-NEXT:    vmv.s.x v8, a0
245; ZVFHMIN-NEXT:    ret
246  %r = insertelement <vscale x 1 x half> %v, half %elt, i32 0
247  ret <vscale x 1 x half> %r
248}
249
250define <vscale x 1 x half> @insertelt_nxv1f16_imm(<vscale x 1 x half> %v, half %elt) {
251; ZVFH-LABEL: insertelt_nxv1f16_imm:
252; ZVFH:       # %bb.0:
253; ZVFH-NEXT:    vsetivli zero, 4, e16, mf4, tu, ma
254; ZVFH-NEXT:    vfmv.s.f v9, fa0
255; ZVFH-NEXT:    vslideup.vi v8, v9, 3
256; ZVFH-NEXT:    ret
257;
258; ZVFHMIN-LABEL: insertelt_nxv1f16_imm:
259; ZVFHMIN:       # %bb.0:
260; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
261; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf4, tu, ma
262; ZVFHMIN-NEXT:    vmv.s.x v9, a0
263; ZVFHMIN-NEXT:    vslideup.vi v8, v9, 3
264; ZVFHMIN-NEXT:    ret
265  %r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
266  ret <vscale x 1 x half> %r
267}
268
269define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %elt, i32 zeroext %idx) {
270; ZVFH-LABEL: insertelt_nxv1f16_idx:
271; ZVFH:       # %bb.0:
272; ZVFH-NEXT:    addi a1, a0, 1
273; ZVFH-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
274; ZVFH-NEXT:    vfmv.s.f v9, fa0
275; ZVFH-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
276; ZVFH-NEXT:    vslideup.vx v8, v9, a0
277; ZVFH-NEXT:    ret
278;
279; ZVFHMIN-LABEL: insertelt_nxv1f16_idx:
280; ZVFHMIN:       # %bb.0:
281; ZVFHMIN-NEXT:    addi a1, a0, 1
282; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
283; ZVFHMIN-NEXT:    vsetvli a3, zero, e16, m1, ta, ma
284; ZVFHMIN-NEXT:    vmv.s.x v9, a2
285; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, mf4, tu, ma
286; ZVFHMIN-NEXT:    vslideup.vx v8, v9, a0
287; ZVFHMIN-NEXT:    ret
288  %r = insertelement <vscale x 1 x half> %v, half %elt, i32 %idx
289  ret <vscale x 1 x half> %r
290}
291
292define <vscale x 2 x half> @insertelt_nxv2f16_0(<vscale x 2 x half> %v, half %elt) {
293; ZVFH-LABEL: insertelt_nxv2f16_0:
294; ZVFH:       # %bb.0:
295; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
296; ZVFH-NEXT:    vfmv.s.f v8, fa0
297; ZVFH-NEXT:    ret
298;
299; ZVFHMIN-LABEL: insertelt_nxv2f16_0:
300; ZVFHMIN:       # %bb.0:
301; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
302; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
303; ZVFHMIN-NEXT:    vmv.s.x v8, a0
304; ZVFHMIN-NEXT:    ret
305  %r = insertelement <vscale x 2 x half> %v, half %elt, i32 0
306  ret <vscale x 2 x half> %r
307}
308
309define <vscale x 2 x half> @insertelt_nxv2f16_imm(<vscale x 2 x half> %v, half %elt) {
310; ZVFH-LABEL: insertelt_nxv2f16_imm:
311; ZVFH:       # %bb.0:
312; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, tu, ma
313; ZVFH-NEXT:    vfmv.s.f v9, fa0
314; ZVFH-NEXT:    vslideup.vi v8, v9, 3
315; ZVFH-NEXT:    ret
316;
317; ZVFHMIN-LABEL: insertelt_nxv2f16_imm:
318; ZVFHMIN:       # %bb.0:
319; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
320; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, tu, ma
321; ZVFHMIN-NEXT:    vmv.s.x v9, a0
322; ZVFHMIN-NEXT:    vslideup.vi v8, v9, 3
323; ZVFHMIN-NEXT:    ret
324  %r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
325  ret <vscale x 2 x half> %r
326}
327
328define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %elt, i32 zeroext %idx) {
329; ZVFH-LABEL: insertelt_nxv2f16_idx:
330; ZVFH:       # %bb.0:
331; ZVFH-NEXT:    addi a1, a0, 1
332; ZVFH-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
333; ZVFH-NEXT:    vfmv.s.f v9, fa0
334; ZVFH-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
335; ZVFH-NEXT:    vslideup.vx v8, v9, a0
336; ZVFH-NEXT:    ret
337;
338; ZVFHMIN-LABEL: insertelt_nxv2f16_idx:
339; ZVFHMIN:       # %bb.0:
340; ZVFHMIN-NEXT:    addi a1, a0, 1
341; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
342; ZVFHMIN-NEXT:    vsetvli a3, zero, e16, m1, ta, ma
343; ZVFHMIN-NEXT:    vmv.s.x v9, a2
344; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, mf2, tu, ma
345; ZVFHMIN-NEXT:    vslideup.vx v8, v9, a0
346; ZVFHMIN-NEXT:    ret
347  %r = insertelement <vscale x 2 x half> %v, half %elt, i32 %idx
348  ret <vscale x 2 x half> %r
349}
350
351define <vscale x 4 x half> @insertelt_nxv4f16_0(<vscale x 4 x half> %v, half %elt) {
352; ZVFH-LABEL: insertelt_nxv4f16_0:
353; ZVFH:       # %bb.0:
354; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
355; ZVFH-NEXT:    vfmv.s.f v8, fa0
356; ZVFH-NEXT:    ret
357;
358; ZVFHMIN-LABEL: insertelt_nxv4f16_0:
359; ZVFHMIN:       # %bb.0:
360; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
361; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
362; ZVFHMIN-NEXT:    vmv.s.x v8, a0
363; ZVFHMIN-NEXT:    ret
364  %r = insertelement <vscale x 4 x half> %v, half %elt, i32 0
365  ret <vscale x 4 x half> %r
366}
367
368define <vscale x 4 x half> @insertelt_nxv4f16_imm(<vscale x 4 x half> %v, half %elt) {
369; ZVFH-LABEL: insertelt_nxv4f16_imm:
370; ZVFH:       # %bb.0:
371; ZVFH-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
372; ZVFH-NEXT:    vfmv.s.f v9, fa0
373; ZVFH-NEXT:    vslideup.vi v8, v9, 3
374; ZVFH-NEXT:    ret
375;
376; ZVFHMIN-LABEL: insertelt_nxv4f16_imm:
377; ZVFHMIN:       # %bb.0:
378; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
379; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
380; ZVFHMIN-NEXT:    vmv.s.x v9, a0
381; ZVFHMIN-NEXT:    vslideup.vi v8, v9, 3
382; ZVFHMIN-NEXT:    ret
383  %r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
384  ret <vscale x 4 x half> %r
385}
386
387define <vscale x 4 x half> @insertelt_nxv4f16_idx(<vscale x 4 x half> %v, half %elt, i32 zeroext %idx) {
388; ZVFH-LABEL: insertelt_nxv4f16_idx:
389; ZVFH:       # %bb.0:
390; ZVFH-NEXT:    addi a1, a0, 1
391; ZVFH-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
392; ZVFH-NEXT:    vfmv.s.f v9, fa0
393; ZVFH-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
394; ZVFH-NEXT:    vslideup.vx v8, v9, a0
395; ZVFH-NEXT:    ret
396;
397; ZVFHMIN-LABEL: insertelt_nxv4f16_idx:
398; ZVFHMIN:       # %bb.0:
399; ZVFHMIN-NEXT:    addi a1, a0, 1
400; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
401; ZVFHMIN-NEXT:    vsetvli a3, zero, e16, m1, ta, ma
402; ZVFHMIN-NEXT:    vmv.s.x v9, a2
403; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
404; ZVFHMIN-NEXT:    vslideup.vx v8, v9, a0
405; ZVFHMIN-NEXT:    ret
406  %r = insertelement <vscale x 4 x half> %v, half %elt, i32 %idx
407  ret <vscale x 4 x half> %r
408}
409
410define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %elt) {
411; ZVFH-LABEL: insertelt_nxv8f16_0:
412; ZVFH:       # %bb.0:
413; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
414; ZVFH-NEXT:    vfmv.s.f v8, fa0
415; ZVFH-NEXT:    ret
416;
417; ZVFHMIN-LABEL: insertelt_nxv8f16_0:
418; ZVFHMIN:       # %bb.0:
419; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
420; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
421; ZVFHMIN-NEXT:    vmv.s.x v8, a0
422; ZVFHMIN-NEXT:    ret
423  %r = insertelement <vscale x 8 x half> %v, half %elt, i32 0
424  ret <vscale x 8 x half> %r
425}
426
427define <vscale x 8 x half> @insertelt_nxv8f16_imm(<vscale x 8 x half> %v, half %elt) {
428; ZVFH-LABEL: insertelt_nxv8f16_imm:
429; ZVFH:       # %bb.0:
430; ZVFH-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
431; ZVFH-NEXT:    vfmv.s.f v10, fa0
432; ZVFH-NEXT:    vslideup.vi v8, v10, 3
433; ZVFH-NEXT:    ret
434;
435; ZVFHMIN-LABEL: insertelt_nxv8f16_imm:
436; ZVFHMIN:       # %bb.0:
437; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
438; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
439; ZVFHMIN-NEXT:    vmv.s.x v10, a0
440; ZVFHMIN-NEXT:    vslideup.vi v8, v10, 3
441; ZVFHMIN-NEXT:    ret
442  %r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
443  ret <vscale x 8 x half> %r
444}
445
446define <vscale x 8 x half> @insertelt_nxv8f16_idx(<vscale x 8 x half> %v, half %elt, i32 zeroext %idx) {
447; ZVFH-LABEL: insertelt_nxv8f16_idx:
448; ZVFH:       # %bb.0:
449; ZVFH-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
450; ZVFH-NEXT:    vfmv.s.f v10, fa0
451; ZVFH-NEXT:    addi a1, a0, 1
452; ZVFH-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
453; ZVFH-NEXT:    vslideup.vx v8, v10, a0
454; ZVFH-NEXT:    ret
455;
456; ZVFHMIN-LABEL: insertelt_nxv8f16_idx:
457; ZVFHMIN:       # %bb.0:
458; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
459; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
460; ZVFHMIN-NEXT:    vmv.s.x v10, a1
461; ZVFHMIN-NEXT:    addi a1, a0, 1
462; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
463; ZVFHMIN-NEXT:    vslideup.vx v8, v10, a0
464; ZVFHMIN-NEXT:    ret
465  %r = insertelement <vscale x 8 x half> %v, half %elt, i32 %idx
466  ret <vscale x 8 x half> %r
467}
468
469define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half %elt) {
470; ZVFH-LABEL: insertelt_nxv16f16_0:
471; ZVFH:       # %bb.0:
472; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
473; ZVFH-NEXT:    vfmv.s.f v8, fa0
474; ZVFH-NEXT:    ret
475;
476; ZVFHMIN-LABEL: insertelt_nxv16f16_0:
477; ZVFHMIN:       # %bb.0:
478; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
479; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
480; ZVFHMIN-NEXT:    vmv.s.x v8, a0
481; ZVFHMIN-NEXT:    ret
482  %r = insertelement <vscale x 16 x half> %v, half %elt, i32 0
483  ret <vscale x 16 x half> %r
484}
485
486define <vscale x 16 x half> @insertelt_nxv16f16_imm(<vscale x 16 x half> %v, half %elt) {
487; ZVFH-LABEL: insertelt_nxv16f16_imm:
488; ZVFH:       # %bb.0:
489; ZVFH-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
490; ZVFH-NEXT:    vfmv.s.f v12, fa0
491; ZVFH-NEXT:    vslideup.vi v8, v12, 3
492; ZVFH-NEXT:    ret
493;
494; ZVFHMIN-LABEL: insertelt_nxv16f16_imm:
495; ZVFHMIN:       # %bb.0:
496; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
497; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
498; ZVFHMIN-NEXT:    vmv.s.x v12, a0
499; ZVFHMIN-NEXT:    vslideup.vi v8, v12, 3
500; ZVFHMIN-NEXT:    ret
501  %r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
502  ret <vscale x 16 x half> %r
503}
504
505define <vscale x 16 x half> @insertelt_nxv16f16_idx(<vscale x 16 x half> %v, half %elt, i32 zeroext %idx) {
506; ZVFH-LABEL: insertelt_nxv16f16_idx:
507; ZVFH:       # %bb.0:
508; ZVFH-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
509; ZVFH-NEXT:    vfmv.s.f v12, fa0
510; ZVFH-NEXT:    addi a1, a0, 1
511; ZVFH-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
512; ZVFH-NEXT:    vslideup.vx v8, v12, a0
513; ZVFH-NEXT:    ret
514;
515; ZVFHMIN-LABEL: insertelt_nxv16f16_idx:
516; ZVFHMIN:       # %bb.0:
517; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
518; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
519; ZVFHMIN-NEXT:    vmv.s.x v12, a1
520; ZVFHMIN-NEXT:    addi a1, a0, 1
521; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
522; ZVFHMIN-NEXT:    vslideup.vx v8, v12, a0
523; ZVFHMIN-NEXT:    ret
524  %r = insertelement <vscale x 16 x half> %v, half %elt, i32 %idx
525  ret <vscale x 16 x half> %r
526}
527
528define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half %elt) {
529; ZVFH-LABEL: insertelt_nxv32f16_0:
530; ZVFH:       # %bb.0:
531; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
532; ZVFH-NEXT:    vfmv.s.f v8, fa0
533; ZVFH-NEXT:    ret
534;
535; ZVFHMIN-LABEL: insertelt_nxv32f16_0:
536; ZVFHMIN:       # %bb.0:
537; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
538; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
539; ZVFHMIN-NEXT:    vmv.s.x v8, a0
540; ZVFHMIN-NEXT:    ret
541  %r = insertelement <vscale x 32 x half> %v, half %elt, i32 0
542  ret <vscale x 32 x half> %r
543}
544
545define <vscale x 32 x half> @insertelt_nxv32f16_imm(<vscale x 32 x half> %v, half %elt) {
546; ZVFH-LABEL: insertelt_nxv32f16_imm:
547; ZVFH:       # %bb.0:
548; ZVFH-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
549; ZVFH-NEXT:    vfmv.s.f v16, fa0
550; ZVFH-NEXT:    vslideup.vi v8, v16, 3
551; ZVFH-NEXT:    ret
552;
553; ZVFHMIN-LABEL: insertelt_nxv32f16_imm:
554; ZVFHMIN:       # %bb.0:
555; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
556; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
557; ZVFHMIN-NEXT:    vmv.s.x v16, a0
558; ZVFHMIN-NEXT:    vslideup.vi v8, v16, 3
559; ZVFHMIN-NEXT:    ret
560  %r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
561  ret <vscale x 32 x half> %r
562}
563
564define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, half %elt, i32 zeroext %idx) {
565; ZVFH-LABEL: insertelt_nxv32f16_idx:
566; ZVFH:       # %bb.0:
567; ZVFH-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
568; ZVFH-NEXT:    vfmv.s.f v16, fa0
569; ZVFH-NEXT:    addi a1, a0, 1
570; ZVFH-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
571; ZVFH-NEXT:    vslideup.vx v8, v16, a0
572; ZVFH-NEXT:    ret
573;
574; ZVFHMIN-LABEL: insertelt_nxv32f16_idx:
575; ZVFHMIN:       # %bb.0:
576; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
577; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
578; ZVFHMIN-NEXT:    vmv.s.x v16, a1
579; ZVFHMIN-NEXT:    addi a1, a0, 1
580; ZVFHMIN-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
581; ZVFHMIN-NEXT:    vslideup.vx v8, v16, a0
582; ZVFHMIN-NEXT:    ret
583  %r = insertelement <vscale x 32 x half> %v, half %elt, i32 %idx
584  ret <vscale x 32 x half> %r
585}
586
587define <vscale x 1 x float> @insertelt_nxv1f32_0(<vscale x 1 x float> %v, float %elt) {
588; CHECK-LABEL: insertelt_nxv1f32_0:
589; CHECK:       # %bb.0:
590; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, ma
591; CHECK-NEXT:    vfmv.s.f v8, fa0
592; CHECK-NEXT:    ret
593  %r = insertelement <vscale x 1 x float> %v, float %elt, i32 0
594  ret <vscale x 1 x float> %r
595}
596
597define <vscale x 1 x float> @insertelt_nxv1f32_imm(<vscale x 1 x float> %v, float %elt) {
598; CHECK-LABEL: insertelt_nxv1f32_imm:
599; CHECK:       # %bb.0:
600; CHECK-NEXT:    vsetivli zero, 4, e32, mf2, tu, ma
601; CHECK-NEXT:    vfmv.s.f v9, fa0
602; CHECK-NEXT:    vslideup.vi v8, v9, 3
603; CHECK-NEXT:    ret
604  %r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
605  ret <vscale x 1 x float> %r
606}
607
608define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, float %elt, i32 zeroext %idx) {
609; CHECK-LABEL: insertelt_nxv1f32_idx:
610; CHECK:       # %bb.0:
611; CHECK-NEXT:    addi a1, a0, 1
612; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
613; CHECK-NEXT:    vfmv.s.f v9, fa0
614; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, ma
615; CHECK-NEXT:    vslideup.vx v8, v9, a0
616; CHECK-NEXT:    ret
617  %r = insertelement <vscale x 1 x float> %v, float %elt, i32 %idx
618  ret <vscale x 1 x float> %r
619}
620
621define <vscale x 2 x float> @insertelt_nxv2f32_0(<vscale x 2 x float> %v, float %elt) {
622; CHECK-LABEL: insertelt_nxv2f32_0:
623; CHECK:       # %bb.0:
624; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, ma
625; CHECK-NEXT:    vfmv.s.f v8, fa0
626; CHECK-NEXT:    ret
627  %r = insertelement <vscale x 2 x float> %v, float %elt, i32 0
628  ret <vscale x 2 x float> %r
629}
630
631define <vscale x 2 x float> @insertelt_nxv2f32_imm(<vscale x 2 x float> %v, float %elt) {
632; CHECK-LABEL: insertelt_nxv2f32_imm:
633; CHECK:       # %bb.0:
634; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
635; CHECK-NEXT:    vfmv.s.f v9, fa0
636; CHECK-NEXT:    vslideup.vi v8, v9, 3
637; CHECK-NEXT:    ret
638  %r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
639  ret <vscale x 2 x float> %r
640}
641
642define <vscale x 2 x float> @insertelt_nxv2f32_idx(<vscale x 2 x float> %v, float %elt, i32 zeroext %idx) {
643; CHECK-LABEL: insertelt_nxv2f32_idx:
644; CHECK:       # %bb.0:
645; CHECK-NEXT:    addi a1, a0, 1
646; CHECK-NEXT:    vsetvli a2, zero, e32, m1, ta, ma
647; CHECK-NEXT:    vfmv.s.f v9, fa0
648; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
649; CHECK-NEXT:    vslideup.vx v8, v9, a0
650; CHECK-NEXT:    ret
651  %r = insertelement <vscale x 2 x float> %v, float %elt, i32 %idx
652  ret <vscale x 2 x float> %r
653}
654
655define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float %elt) {
656; CHECK-LABEL: insertelt_nxv4f32_0:
657; CHECK:       # %bb.0:
658; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, ma
659; CHECK-NEXT:    vfmv.s.f v8, fa0
660; CHECK-NEXT:    ret
661  %r = insertelement <vscale x 4 x float> %v, float %elt, i32 0
662  ret <vscale x 4 x float> %r
663}
664
665define <vscale x 4 x float> @insertelt_nxv4f32_imm(<vscale x 4 x float> %v, float %elt) {
666; CHECK-LABEL: insertelt_nxv4f32_imm:
667; CHECK:       # %bb.0:
668; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
669; CHECK-NEXT:    vfmv.s.f v10, fa0
670; CHECK-NEXT:    vslideup.vi v8, v10, 3
671; CHECK-NEXT:    ret
672  %r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
673  ret <vscale x 4 x float> %r
674}
675
676define <vscale x 4 x float> @insertelt_nxv4f32_idx(<vscale x 4 x float> %v, float %elt, i32 zeroext %idx) {
677; CHECK-LABEL: insertelt_nxv4f32_idx:
678; CHECK:       # %bb.0:
679; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
680; CHECK-NEXT:    vfmv.s.f v10, fa0
681; CHECK-NEXT:    addi a1, a0, 1
682; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
683; CHECK-NEXT:    vslideup.vx v8, v10, a0
684; CHECK-NEXT:    ret
685  %r = insertelement <vscale x 4 x float> %v, float %elt, i32 %idx
686  ret <vscale x 4 x float> %r
687}
688
689define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float %elt) {
690; CHECK-LABEL: insertelt_nxv8f32_0:
691; CHECK:       # %bb.0:
692; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, ma
693; CHECK-NEXT:    vfmv.s.f v8, fa0
694; CHECK-NEXT:    ret
695  %r = insertelement <vscale x 8 x float> %v, float %elt, i32 0
696  ret <vscale x 8 x float> %r
697}
698
699define <vscale x 8 x float> @insertelt_nxv8f32_imm(<vscale x 8 x float> %v, float %elt) {
700; CHECK-LABEL: insertelt_nxv8f32_imm:
701; CHECK:       # %bb.0:
702; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
703; CHECK-NEXT:    vfmv.s.f v12, fa0
704; CHECK-NEXT:    vslideup.vi v8, v12, 3
705; CHECK-NEXT:    ret
706  %r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
707  ret <vscale x 8 x float> %r
708}
709
710define <vscale x 8 x float> @insertelt_nxv8f32_idx(<vscale x 8 x float> %v, float %elt, i32 zeroext %idx) {
711; CHECK-LABEL: insertelt_nxv8f32_idx:
712; CHECK:       # %bb.0:
713; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
714; CHECK-NEXT:    vfmv.s.f v12, fa0
715; CHECK-NEXT:    addi a1, a0, 1
716; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
717; CHECK-NEXT:    vslideup.vx v8, v12, a0
718; CHECK-NEXT:    ret
719  %r = insertelement <vscale x 8 x float> %v, float %elt, i32 %idx
720  ret <vscale x 8 x float> %r
721}
722
723define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, float %elt) {
724; CHECK-LABEL: insertelt_nxv16f32_0:
725; CHECK:       # %bb.0:
726; CHECK-NEXT:    vsetvli a0, zero, e32, m1, tu, ma
727; CHECK-NEXT:    vfmv.s.f v8, fa0
728; CHECK-NEXT:    ret
729  %r = insertelement <vscale x 16 x float> %v, float %elt, i32 0
730  ret <vscale x 16 x float> %r
731}
732
733define <vscale x 16 x float> @insertelt_nxv16f32_imm(<vscale x 16 x float> %v, float %elt) {
734; CHECK-LABEL: insertelt_nxv16f32_imm:
735; CHECK:       # %bb.0:
736; CHECK-NEXT:    vsetivli zero, 4, e32, m1, tu, ma
737; CHECK-NEXT:    vfmv.s.f v16, fa0
738; CHECK-NEXT:    vslideup.vi v8, v16, 3
739; CHECK-NEXT:    ret
740  %r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
741  ret <vscale x 16 x float> %r
742}
743
744define <vscale x 16 x float> @insertelt_nxv16f32_idx(<vscale x 16 x float> %v, float %elt, i32 zeroext %idx) {
745; CHECK-LABEL: insertelt_nxv16f32_idx:
746; CHECK:       # %bb.0:
747; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
748; CHECK-NEXT:    vfmv.s.f v16, fa0
749; CHECK-NEXT:    addi a1, a0, 1
750; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
751; CHECK-NEXT:    vslideup.vx v8, v16, a0
752; CHECK-NEXT:    ret
753  %r = insertelement <vscale x 16 x float> %v, float %elt, i32 %idx
754  ret <vscale x 16 x float> %r
755}
756
757define <vscale x 1 x double> @insertelt_nxv1f64_0(<vscale x 1 x double> %v, double %elt) {
758; CHECK-LABEL: insertelt_nxv1f64_0:
759; CHECK:       # %bb.0:
760; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, ma
761; CHECK-NEXT:    vfmv.s.f v8, fa0
762; CHECK-NEXT:    ret
763  %r = insertelement <vscale x 1 x double> %v, double %elt, i32 0
764  ret <vscale x 1 x double> %r
765}
766
767define <vscale x 1 x double> @insertelt_nxv1f64_imm(<vscale x 1 x double> %v, double %elt) {
768; CHECK-LABEL: insertelt_nxv1f64_imm:
769; CHECK:       # %bb.0:
770; CHECK-NEXT:    vsetivli zero, 4, e64, m1, tu, ma
771; CHECK-NEXT:    vfmv.s.f v9, fa0
772; CHECK-NEXT:    vslideup.vi v8, v9, 3
773; CHECK-NEXT:    ret
774  %r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
775  ret <vscale x 1 x double> %r
776}
777
778define <vscale x 1 x double> @insertelt_nxv1f64_idx(<vscale x 1 x double> %v, double %elt, i32 zeroext %idx) {
779; CHECK-LABEL: insertelt_nxv1f64_idx:
780; CHECK:       # %bb.0:
781; CHECK-NEXT:    addi a1, a0, 1
782; CHECK-NEXT:    vsetvli a2, zero, e64, m1, ta, ma
783; CHECK-NEXT:    vfmv.s.f v9, fa0
784; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
785; CHECK-NEXT:    vslideup.vx v8, v9, a0
786; CHECK-NEXT:    ret
787  %r = insertelement <vscale x 1 x double> %v, double %elt, i32 %idx
788  ret <vscale x 1 x double> %r
789}
790
791define <vscale x 2 x double> @insertelt_nxv2f64_0(<vscale x 2 x double> %v, double %elt) {
792; CHECK-LABEL: insertelt_nxv2f64_0:
793; CHECK:       # %bb.0:
794; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, ma
795; CHECK-NEXT:    vfmv.s.f v8, fa0
796; CHECK-NEXT:    ret
797  %r = insertelement <vscale x 2 x double> %v, double %elt, i32 0
798  ret <vscale x 2 x double> %r
799}
800
801define <vscale x 2 x double> @insertelt_nxv2f64_imm(<vscale x 2 x double> %v, double %elt) {
802; CHECK-LABEL: insertelt_nxv2f64_imm:
803; CHECK:       # %bb.0:
804; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
805; CHECK-NEXT:    vfmv.s.f v10, fa0
806; CHECK-NEXT:    vslideup.vi v8, v10, 3
807; CHECK-NEXT:    ret
808  %r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
809  ret <vscale x 2 x double> %r
810}
811
812define <vscale x 2 x double> @insertelt_nxv2f64_idx(<vscale x 2 x double> %v, double %elt, i32 zeroext %idx) {
813; CHECK-LABEL: insertelt_nxv2f64_idx:
814; CHECK:       # %bb.0:
815; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
816; CHECK-NEXT:    vfmv.s.f v10, fa0
817; CHECK-NEXT:    addi a1, a0, 1
818; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
819; CHECK-NEXT:    vslideup.vx v8, v10, a0
820; CHECK-NEXT:    ret
821  %r = insertelement <vscale x 2 x double> %v, double %elt, i32 %idx
822  ret <vscale x 2 x double> %r
823}
824
825define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, double %elt) {
826; CHECK-LABEL: insertelt_nxv4f64_0:
827; CHECK:       # %bb.0:
828; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, ma
829; CHECK-NEXT:    vfmv.s.f v8, fa0
830; CHECK-NEXT:    ret
831  %r = insertelement <vscale x 4 x double> %v, double %elt, i32 0
832  ret <vscale x 4 x double> %r
833}
834
835define <vscale x 4 x double> @insertelt_nxv4f64_imm(<vscale x 4 x double> %v, double %elt) {
836; CHECK-LABEL: insertelt_nxv4f64_imm:
837; CHECK:       # %bb.0:
838; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
839; CHECK-NEXT:    vfmv.s.f v12, fa0
840; CHECK-NEXT:    vslideup.vi v8, v12, 3
841; CHECK-NEXT:    ret
842  %r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
843  ret <vscale x 4 x double> %r
844}
845
846define <vscale x 4 x double> @insertelt_nxv4f64_idx(<vscale x 4 x double> %v, double %elt, i32 zeroext %idx) {
847; CHECK-LABEL: insertelt_nxv4f64_idx:
848; CHECK:       # %bb.0:
849; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
850; CHECK-NEXT:    vfmv.s.f v12, fa0
851; CHECK-NEXT:    addi a1, a0, 1
852; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
853; CHECK-NEXT:    vslideup.vx v8, v12, a0
854; CHECK-NEXT:    ret
855  %r = insertelement <vscale x 4 x double> %v, double %elt, i32 %idx
856  ret <vscale x 4 x double> %r
857}
858
859define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, double %elt) {
860; CHECK-LABEL: insertelt_nxv8f64_0:
861; CHECK:       # %bb.0:
862; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, ma
863; CHECK-NEXT:    vfmv.s.f v8, fa0
864; CHECK-NEXT:    ret
865  %r = insertelement <vscale x 8 x double> %v, double %elt, i32 0
866  ret <vscale x 8 x double> %r
867}
868
869define <vscale x 8 x double> @insertelt_nxv8f64_imm(<vscale x 8 x double> %v, double %elt) {
870; CHECK-LABEL: insertelt_nxv8f64_imm:
871; CHECK:       # %bb.0:
872; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
873; CHECK-NEXT:    vfmv.s.f v16, fa0
874; CHECK-NEXT:    vslideup.vi v8, v16, 3
875; CHECK-NEXT:    ret
876  %r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
877  ret <vscale x 8 x double> %r
878}
879
880define <vscale x 8 x double> @insertelt_nxv8f64_idx(<vscale x 8 x double> %v, double %elt, i32 zeroext %idx) {
881; CHECK-LABEL: insertelt_nxv8f64_idx:
882; CHECK:       # %bb.0:
883; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
884; CHECK-NEXT:    vfmv.s.f v16, fa0
885; CHECK-NEXT:    addi a1, a0, 1
886; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
887; CHECK-NEXT:    vslideup.vx v8, v16, a0
888; CHECK-NEXT:    ret
889  %r = insertelement <vscale x 8 x double> %v, double %elt, i32 %idx
890  ret <vscale x 8 x double> %r
891}
892