xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3
4define <4 x i8> @ret_v4i8(ptr %p) {
5; CHECK-LABEL: ret_v4i8:
6; CHECK:       # %bb.0:
7; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
8; CHECK-NEXT:    vle8.v v8, (a0)
9; CHECK-NEXT:    ret
10  %v = load <4 x i8>, ptr %p
11  ret <4 x i8> %v
12}
13
14define <4 x i32> @ret_v4i32(ptr %p) {
15; CHECK-LABEL: ret_v4i32:
16; CHECK:       # %bb.0:
17; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
18; CHECK-NEXT:    vle32.v v8, (a0)
19; CHECK-NEXT:    ret
20  %v = load <4 x i32>, ptr %p
21  ret <4 x i32> %v
22}
23
24define <8 x i32> @ret_v8i32(ptr %p) {
25; CHECK-LABEL: ret_v8i32:
26; CHECK:       # %bb.0:
27; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
28; CHECK-NEXT:    vle32.v v8, (a0)
29; CHECK-NEXT:    ret
30  %v = load <8 x i32>, ptr %p
31  ret <8 x i32> %v
32}
33
34define <16 x i64> @ret_v16i64(ptr %p) {
35; CHECK-LABEL: ret_v16i64:
36; CHECK:       # %bb.0:
37; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
38; CHECK-NEXT:    vle64.v v8, (a0)
39; CHECK-NEXT:    ret
40  %v = load <16 x i64>, ptr %p
41  ret <16 x i64> %v
42}
43
44define <8 x i1> @ret_mask_v8i1(ptr %p) {
45; CHECK-LABEL: ret_mask_v8i1:
46; CHECK:       # %bb.0:
47; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
48; CHECK-NEXT:    vlm.v v0, (a0)
49; CHECK-NEXT:    ret
50  %v = load <8 x i1>, ptr %p
51  ret <8 x i1> %v
52}
53
54define <32 x i1> @ret_mask_v32i1(ptr %p) {
55; CHECK-LABEL: ret_mask_v32i1:
56; CHECK:       # %bb.0:
57; CHECK-NEXT:    li a1, 32
58; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
59; CHECK-NEXT:    vlm.v v0, (a0)
60; CHECK-NEXT:    ret
61  %v = load <32 x i1>, ptr %p
62  ret <32 x i1> %v
63}
64
65; Return the vector via registers v8-v23
66define <64 x i32> @ret_split_v64i32(ptr %x) {
67; CHECK-LABEL: ret_split_v64i32:
68; CHECK:       # %bb.0:
69; CHECK-NEXT:    li a1, 32
70; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
71; CHECK-NEXT:    vle32.v v8, (a0)
72; CHECK-NEXT:    addi a0, a0, 128
73; CHECK-NEXT:    vle32.v v16, (a0)
74; CHECK-NEXT:    ret
75  %v = load <64 x i32>, ptr %x
76  ret <64 x i32> %v
77}
78
79; Return the vector fully via the stack
80define <128 x i32> @ret_split_v128i32(ptr %x) {
81; CHECK-LABEL: ret_split_v128i32:
82; CHECK:       # %bb.0:
83; CHECK-NEXT:    addi a2, a1, 128
84; CHECK-NEXT:    li a3, 32
85; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
86; CHECK-NEXT:    vle32.v v8, (a2)
87; CHECK-NEXT:    addi a2, a1, 256
88; CHECK-NEXT:    vle32.v v16, (a2)
89; CHECK-NEXT:    addi a2, a1, 384
90; CHECK-NEXT:    vle32.v v24, (a1)
91; CHECK-NEXT:    addi a1, a0, 384
92; CHECK-NEXT:    vle32.v v0, (a2)
93; CHECK-NEXT:    addi a2, a0, 256
94; CHECK-NEXT:    vse32.v v24, (a0)
95; CHECK-NEXT:    addi a0, a0, 128
96; CHECK-NEXT:    vse32.v v0, (a1)
97; CHECK-NEXT:    vse32.v v16, (a2)
98; CHECK-NEXT:    vse32.v v8, (a0)
99; CHECK-NEXT:    ret
100  %v = load <128 x i32>, ptr %x
101  ret <128 x i32> %v
102}
103
104define <4 x i8> @ret_v8i8_param_v4i8(<4 x i8> %v) {
105; CHECK-LABEL: ret_v8i8_param_v4i8:
106; CHECK:       # %bb.0:
107; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
108; CHECK-NEXT:    vadd.vi v8, v8, 2
109; CHECK-NEXT:    ret
110  %r = add <4 x i8> %v, <i8 2, i8 2, i8 2, i8 2>
111  ret <4 x i8> %r
112}
113
114define <4 x i8> @ret_v4i8_param_v4i8_v4i8(<4 x i8> %v, <4 x i8> %w) {
115; CHECK-LABEL: ret_v4i8_param_v4i8_v4i8:
116; CHECK:       # %bb.0:
117; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
118; CHECK-NEXT:    vadd.vv v8, v8, v9
119; CHECK-NEXT:    ret
120  %r = add <4 x i8> %v, %w
121  ret <4 x i8> %r
122}
123
124define <4 x i64> @ret_v4i64_param_v4i64_v4i64(<4 x i64> %v, <4 x i64> %w) {
125; CHECK-LABEL: ret_v4i64_param_v4i64_v4i64:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
128; CHECK-NEXT:    vadd.vv v8, v8, v10
129; CHECK-NEXT:    ret
130  %r = add <4 x i64> %v, %w
131  ret <4 x i64> %r
132}
133
134define <8 x i1> @ret_v8i1_param_v8i1_v8i1(<8 x i1> %v, <8 x i1> %w) {
135; CHECK-LABEL: ret_v8i1_param_v8i1_v8i1:
136; CHECK:       # %bb.0:
137; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
138; CHECK-NEXT:    vmxor.mm v0, v0, v8
139; CHECK-NEXT:    ret
140  %r = xor <8 x i1> %v, %w
141  ret <8 x i1> %r
142}
143
144define <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w) {
145; CHECK-LABEL: ret_v32i1_param_v32i1_v32i1:
146; CHECK:       # %bb.0:
147; CHECK-NEXT:    li a0, 32
148; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
149; CHECK-NEXT:    vmand.mm v0, v0, v8
150; CHECK-NEXT:    ret
151  %r = and <32 x i1> %v, %w
152  ret <32 x i1> %r
153}
154
155define <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
156; CHECK-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
157; CHECK:       # %bb.0:
158; CHECK-NEXT:    li a2, 32
159; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
160; CHECK-NEXT:    vle32.v v24, (a0)
161; CHECK-NEXT:    vadd.vv v8, v8, v16
162; CHECK-NEXT:    vadd.vv v8, v8, v24
163; CHECK-NEXT:    vadd.vx v8, v8, a1
164; CHECK-NEXT:    ret
165  %r = add <32 x i32> %x, %y
166  %s = add <32 x i32> %r, %z
167  %head = insertelement <32 x i32> poison, i32 %w, i32 0
168  %splat = shufflevector <32 x i32> %head, <32 x i32> poison, <32 x i32> zeroinitializer
169  %t = add <32 x i32> %s, %splat
170  ret <32 x i32> %t
171}
172
173declare <32 x i32> @ext2(<32 x i32>, <32 x i32>, i32, i32)
174declare <32 x i32> @ext3(<32 x i32>, <32 x i32>, <32 x i32>, i32, i32)
175
176define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, i32 %w) {
177; CHECK-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
178; CHECK:       # %bb.0:
179; CHECK-NEXT:    addi sp, sp, -16
180; CHECK-NEXT:    .cfi_def_cfa_offset 16
181; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
182; CHECK-NEXT:    .cfi_offset ra, -8
183; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
184; CHECK-NEXT:    vmv8r.v v24, v8
185; CHECK-NEXT:    li a1, 2
186; CHECK-NEXT:    vmv8r.v v8, v16
187; CHECK-NEXT:    vmv8r.v v16, v24
188; CHECK-NEXT:    call ext2
189; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
190; CHECK-NEXT:    .cfi_restore ra
191; CHECK-NEXT:    addi sp, sp, 16
192; CHECK-NEXT:    .cfi_def_cfa_offset 0
193; CHECK-NEXT:    ret
194  %t = call <32 x i32> @ext2(<32 x i32> %y, <32 x i32> %x, i32 %w, i32 2)
195  ret <32 x i32> %t
196}
197
198define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
199; CHECK-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
200; CHECK:       # %bb.0:
201; CHECK-NEXT:    addi sp, sp, -256
202; CHECK-NEXT:    .cfi_def_cfa_offset 256
203; CHECK-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
204; CHECK-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
205; CHECK-NEXT:    .cfi_offset ra, -8
206; CHECK-NEXT:    .cfi_offset s0, -16
207; CHECK-NEXT:    addi s0, sp, 256
208; CHECK-NEXT:    .cfi_def_cfa s0, 0
209; CHECK-NEXT:    andi sp, sp, -128
210; CHECK-NEXT:    li a2, 32
211; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
212; CHECK-NEXT:    vle32.v v24, (a0)
213; CHECK-NEXT:    mv a3, sp
214; CHECK-NEXT:    mv a0, sp
215; CHECK-NEXT:    li a2, 42
216; CHECK-NEXT:    vse32.v v8, (a3)
217; CHECK-NEXT:    vmv.v.v v8, v24
218; CHECK-NEXT:    call ext3
219; CHECK-NEXT:    addi sp, s0, -256
220; CHECK-NEXT:    .cfi_def_cfa sp, 256
221; CHECK-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
222; CHECK-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
223; CHECK-NEXT:    .cfi_restore ra
224; CHECK-NEXT:    .cfi_restore s0
225; CHECK-NEXT:    addi sp, sp, 256
226; CHECK-NEXT:    .cfi_def_cfa_offset 0
227; CHECK-NEXT:    ret
228  %t = call <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42)
229  ret <32 x i32> %t
230}
231
232; Test various configurations of split vector types where the values are split
233; across both registers and the stack.
234;             a0+64 z[16:31]
235;             v20m2 y[24:31], v22m2 z[0:7], a1+0 z[8:15], a1+32 z[16:23],
236;             a1+64 z[24:31]
237;             v16 y[12:15], v17 y[16:19], v18 y[20:23], v19 y[24:27],
238;             v20 y[28:31], v21 z[0:3], v22 z[4:7], v23 z[8:11],
239;             a1+0 z[12:15], a1+16 z[16:19], a1+32 z[20:23], a1+48 z[24:27],
240;             a1+64 z[28:31]
241define <32 x i32> @split_vector_args(<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>, <32 x i32> %y, <32 x i32> %z) {
242; CHECK-LABEL: split_vector_args:
243; CHECK:       # %bb.0:
244; CHECK-NEXT:    li a1, 32
245; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
246; CHECK-NEXT:    vle32.v v8, (a0)
247; CHECK-NEXT:    vadd.vv v8, v16, v8
248; CHECK-NEXT:    ret
249  %v0 = add <32 x i32> %y, %z
250  ret <32 x i32> %v0
251}
252
253define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
254; CHECK-LABEL: call_split_vector_args:
255; CHECK:       # %bb.0:
256; CHECK-NEXT:    addi sp, sp, -256
257; CHECK-NEXT:    .cfi_def_cfa_offset 256
258; CHECK-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
259; CHECK-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
260; CHECK-NEXT:    .cfi_offset ra, -8
261; CHECK-NEXT:    .cfi_offset s0, -16
262; CHECK-NEXT:    addi s0, sp, 256
263; CHECK-NEXT:    .cfi_def_cfa s0, 0
264; CHECK-NEXT:    andi sp, sp, -128
265; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
266; CHECK-NEXT:    vle32.v v8, (a0)
267; CHECK-NEXT:    li a0, 32
268; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
269; CHECK-NEXT:    vle32.v v16, (a1)
270; CHECK-NEXT:    mv a1, sp
271; CHECK-NEXT:    mv a0, sp
272; CHECK-NEXT:    vse32.v v16, (a1)
273; CHECK-NEXT:    vmv1r.v v9, v8
274; CHECK-NEXT:    vmv1r.v v10, v8
275; CHECK-NEXT:    vmv1r.v v11, v8
276; CHECK-NEXT:    vmv1r.v v12, v8
277; CHECK-NEXT:    call split_vector_args
278; CHECK-NEXT:    addi sp, s0, -256
279; CHECK-NEXT:    .cfi_def_cfa sp, 256
280; CHECK-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
281; CHECK-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
282; CHECK-NEXT:    .cfi_restore ra
283; CHECK-NEXT:    .cfi_restore s0
284; CHECK-NEXT:    addi sp, sp, 256
285; CHECK-NEXT:    .cfi_def_cfa_offset 0
286; CHECK-NEXT:    ret
287  %a = load <2 x i32>, ptr %pa
288  %b = load <32 x i32>, ptr %pb
289  %r = call <32 x i32> @split_vector_args(<2 x i32> %a, <2 x i32> %a, <2 x i32> %a, <2 x i32> %a, <2 x i32> %a, <32 x i32> %b, <32 x i32> %b)
290  ret <32 x i32> %r
291}
292
293; A rather pathological test case in which we exhaust all vector registers and
294; all scalar registers, forcing %z and %8 to go through the stack.
295define <32 x i32> @vector_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8) {
296; CHECK-LABEL: vector_arg_via_stack:
297; CHECK:       # %bb.0:
298; CHECK-NEXT:    li a0, 32
299; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
300; CHECK-NEXT:    vle32.v v16, (sp)
301; CHECK-NEXT:    vadd.vv v8, v8, v16
302; CHECK-NEXT:    ret
303  %s = add <32 x i32> %x, %z
304  ret <32 x i32> %s
305}
306
307; Calling the function above. Ensure we pass the arguments correctly.
308define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z) {
309; CHECK-LABEL: pass_vector_arg_via_stack:
310; CHECK:       # %bb.0:
311; CHECK-NEXT:    addi sp, sp, -144
312; CHECK-NEXT:    .cfi_def_cfa_offset 144
313; CHECK-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
314; CHECK-NEXT:    .cfi_offset ra, -8
315; CHECK-NEXT:    li a0, 32
316; CHECK-NEXT:    li t0, 8
317; CHECK-NEXT:    li a1, 1
318; CHECK-NEXT:    li a2, 2
319; CHECK-NEXT:    li a3, 3
320; CHECK-NEXT:    li a4, 4
321; CHECK-NEXT:    li a5, 5
322; CHECK-NEXT:    li a6, 6
323; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
324; CHECK-NEXT:    vmv.v.i v8, 0
325; CHECK-NEXT:    vse32.v v8, (sp)
326; CHECK-NEXT:    li a7, 7
327; CHECK-NEXT:    sd t0, 128(sp)
328; CHECK-NEXT:    li a0, 0
329; CHECK-NEXT:    vmv.v.i v16, 0
330; CHECK-NEXT:    call vector_arg_via_stack
331; CHECK-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
332; CHECK-NEXT:    .cfi_restore ra
333; CHECK-NEXT:    addi sp, sp, 144
334; CHECK-NEXT:    .cfi_def_cfa_offset 0
335; CHECK-NEXT:    ret
336  %s = call <32 x i32> @vector_arg_via_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8)
337  ret <32 x i32> %s
338}
339
340; Another pathological case but where a small mask vector must be passed on the
341; stack.
342define <4 x i1> @vector_mask_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8, <4 x i1> %9, <4 x i1> %10) {
343; CHECK-LABEL: vector_mask_arg_via_stack:
344; CHECK:       # %bb.0:
345; CHECK-NEXT:    addi a0, sp, 136
346; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
347; CHECK-NEXT:    vlm.v v0, (a0)
348; CHECK-NEXT:    ret
349  ret <4 x i1> %10
350}
351
352; Calling the function above. Ensure we pass the mask arguments correctly. We
353; legalize stores of small masks such that the value is at least byte-sized.
354define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
355; CHECK-LABEL: pass_vector_mask_arg_via_stack:
356; CHECK:       # %bb.0:
357; CHECK-NEXT:    addi sp, sp, -160
358; CHECK-NEXT:    .cfi_def_cfa_offset 160
359; CHECK-NEXT:    sd ra, 152(sp) # 8-byte Folded Spill
360; CHECK-NEXT:    .cfi_offset ra, -8
361; CHECK-NEXT:    li a0, 32
362; CHECK-NEXT:    li a1, 8
363; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
364; CHECK-NEXT:    vmv.v.i v16, 0
365; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
366; CHECK-NEXT:    vmv.v.i v17, 0
367; CHECK-NEXT:    addi a2, sp, 136
368; CHECK-NEXT:    li a5, 5
369; CHECK-NEXT:    li a6, 6
370; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
371; CHECK-NEXT:    vmv.v.i v8, 0
372; CHECK-NEXT:    sd a1, 128(sp)
373; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
374; CHECK-NEXT:    vmerge.vim v16, v16, 1, v0
375; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
376; CHECK-NEXT:    vse32.v v8, (sp)
377; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
378; CHECK-NEXT:    vmv.v.v v17, v16
379; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
380; CHECK-NEXT:    vmsne.vi v16, v17, 0
381; CHECK-NEXT:    li a7, 7
382; CHECK-NEXT:    vsm.v v16, (a2)
383; CHECK-NEXT:    li a0, 0
384; CHECK-NEXT:    li a1, 0
385; CHECK-NEXT:    li a2, 0
386; CHECK-NEXT:    li a3, 0
387; CHECK-NEXT:    li a4, 0
388; CHECK-NEXT:    vmv8r.v v16, v8
389; CHECK-NEXT:    call vector_mask_arg_via_stack
390; CHECK-NEXT:    ld ra, 152(sp) # 8-byte Folded Reload
391; CHECK-NEXT:    .cfi_restore ra
392; CHECK-NEXT:    addi sp, sp, 160
393; CHECK-NEXT:    .cfi_def_cfa_offset 0
394; CHECK-NEXT:    ret
395  %r = call <4 x i1> @vector_mask_arg_via_stack(i32 0, i32 0, i32 0, i32 0, i32 0, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8, <4 x i1> %v, <4 x i1> %v)
396  ret <4 x i1> %r
397}
398