xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+v < %s \
3; RUN:     --verify-machineinstrs | FileCheck %s
4
5define <vscale x 1 x i1> @test_1xi1(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) nounwind {
6; CHECK-LABEL: test_1xi1:
7; CHECK:       # %bb.0: # %entry
8; CHECK-NEXT:    #APP
9; CHECK-NEXT:    vmand.mm v0, v0, v8
10; CHECK-NEXT:    #NO_APP
11; CHECK-NEXT:    ret
12entry:
13  %0 = tail call <vscale x 1 x i1> asm "vmand.mm $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2)
14  ret <vscale x 1 x i1> %0
15}
16
17define <vscale x 2 x i1> @test_2xi1(<vscale x 2 x i1> %in, <vscale x 2 x i1> %in2) nounwind {
18; CHECK-LABEL: test_2xi1:
19; CHECK:       # %bb.0: # %entry
20; CHECK-NEXT:    #APP
21; CHECK-NEXT:    vmand.mm v0, v0, v8
22; CHECK-NEXT:    #NO_APP
23; CHECK-NEXT:    ret
24entry:
25  %0 = tail call <vscale x 2 x i1> asm "vmand.mm $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 2 x i1> %in, <vscale x 2 x i1> %in2)
26  ret <vscale x 2 x i1> %0
27}
28
29define <vscale x 4 x i1> @test_4xi1(<vscale x 4 x i1> %in, <vscale x 4 x i1> %in2) nounwind {
30; CHECK-LABEL: test_4xi1:
31; CHECK:       # %bb.0: # %entry
32; CHECK-NEXT:    #APP
33; CHECK-NEXT:    vmand.mm v0, v0, v8
34; CHECK-NEXT:    #NO_APP
35; CHECK-NEXT:    ret
36entry:
37  %0 = tail call <vscale x 4 x i1> asm "vmand.mm $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 4 x i1> %in, <vscale x 4 x i1> %in2)
38  ret <vscale x 4 x i1> %0
39}
40
41define <vscale x 8 x i1> @test_8xi1(<vscale x 8 x i1> %in, <vscale x 8 x i1> %in2) nounwind {
42; CHECK-LABEL: test_8xi1:
43; CHECK:       # %bb.0: # %entry
44; CHECK-NEXT:    #APP
45; CHECK-NEXT:    vmand.mm v0, v0, v8
46; CHECK-NEXT:    #NO_APP
47; CHECK-NEXT:    ret
48entry:
49  %0 = tail call <vscale x 8 x i1> asm "vmand.mm $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 8 x i1> %in, <vscale x 8 x i1> %in2)
50  ret <vscale x 8 x i1> %0
51}
52
53define <vscale x 16 x i1> @test_16xi1(<vscale x 16 x i1> %in, <vscale x 16 x i1> %in2) nounwind {
54; CHECK-LABEL: test_16xi1:
55; CHECK:       # %bb.0: # %entry
56; CHECK-NEXT:    #APP
57; CHECK-NEXT:    vmand.mm v0, v0, v8
58; CHECK-NEXT:    #NO_APP
59; CHECK-NEXT:    ret
60entry:
61  %0 = tail call <vscale x 16 x i1> asm "vmand.mm $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 16 x i1> %in, <vscale x 16 x i1> %in2)
62  ret <vscale x 16 x i1> %0
63}
64
65define <vscale x 32 x i1> @test_32xi1(<vscale x 32 x i1> %in, <vscale x 32 x i1> %in2) nounwind {
66; CHECK-LABEL: test_32xi1:
67; CHECK:       # %bb.0: # %entry
68; CHECK-NEXT:    #APP
69; CHECK-NEXT:    vmand.mm v0, v0, v8
70; CHECK-NEXT:    #NO_APP
71; CHECK-NEXT:    ret
72entry:
73  %0 = tail call <vscale x 32 x i1> asm "vmand.mm $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 32 x i1> %in, <vscale x 32 x i1> %in2)
74  ret <vscale x 32 x i1> %0
75}
76
77define <vscale x 64 x i1> @test_64xi1(<vscale x 64 x i1> %in, <vscale x 64 x i1> %in2) nounwind {
78; CHECK-LABEL: test_64xi1:
79; CHECK:       # %bb.0: # %entry
80; CHECK-NEXT:    #APP
81; CHECK-NEXT:    vmand.mm v0, v0, v8
82; CHECK-NEXT:    #NO_APP
83; CHECK-NEXT:    ret
84entry:
85  %0 = tail call <vscale x 64 x i1> asm "vmand.mm $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 64 x i1> %in, <vscale x 64 x i1> %in2)
86  ret <vscale x 64 x i1> %0
87}
88
89define <vscale x 1 x i64> @test_1xi64(<vscale x 1 x i64> %in, <vscale x 1 x i64> %in2) nounwind {
90; CHECK-LABEL: test_1xi64:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    #APP
93; CHECK-NEXT:    vadd.vv v8, v8, v9
94; CHECK-NEXT:    #NO_APP
95; CHECK-NEXT:    ret
96entry:
97  %0 = tail call <vscale x 1 x i64> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 1 x i64> %in, <vscale x 1 x i64> %in2)
98  ret <vscale x 1 x i64> %0
99}
100
101define <vscale x 2 x i64> @test_2xi64(<vscale x 2 x i64> %in, <vscale x 2 x i64> %in2) nounwind {
102; CHECK-LABEL: test_2xi64:
103; CHECK:       # %bb.0: # %entry
104; CHECK-NEXT:    #APP
105; CHECK-NEXT:    vadd.vv v8, v8, v10
106; CHECK-NEXT:    #NO_APP
107; CHECK-NEXT:    ret
108entry:
109  %0 = tail call <vscale x 2 x i64> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 2 x i64> %in, <vscale x 2 x i64> %in2)
110  ret <vscale x 2 x i64> %0
111}
112
113define <vscale x 4 x i64> @test_4xi64(<vscale x 4 x i64> %in, <vscale x 4 x i64> %in2) nounwind {
114; CHECK-LABEL: test_4xi64:
115; CHECK:       # %bb.0: # %entry
116; CHECK-NEXT:    #APP
117; CHECK-NEXT:    vadd.vv v8, v8, v12
118; CHECK-NEXT:    #NO_APP
119; CHECK-NEXT:    ret
120entry:
121  %0 = tail call <vscale x 4 x i64> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 4 x i64> %in, <vscale x 4 x i64> %in2)
122  ret <vscale x 4 x i64> %0
123}
124
125define <vscale x 8 x i64> @test_8xi64(<vscale x 8 x i64> %in, <vscale x 8 x i64> %in2) nounwind {
126; CHECK-LABEL: test_8xi64:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    #APP
129; CHECK-NEXT:    vadd.vv v8, v8, v16
130; CHECK-NEXT:    #NO_APP
131; CHECK-NEXT:    ret
132entry:
133  %0 = tail call <vscale x 8 x i64> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 8 x i64> %in, <vscale x 8 x i64> %in2)
134  ret <vscale x 8 x i64> %0
135}
136
137define <vscale x 1 x i32> @test_1xi32(<vscale x 1 x i32> %in, <vscale x 1 x i32> %in2) nounwind {
138; CHECK-LABEL: test_1xi32:
139; CHECK:       # %bb.0: # %entry
140; CHECK-NEXT:    #APP
141; CHECK-NEXT:    vadd.vv v8, v8, v9
142; CHECK-NEXT:    #NO_APP
143; CHECK-NEXT:    ret
144entry:
145  %0 = tail call <vscale x 1 x i32> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 1 x i32> %in, <vscale x 1 x i32> %in2)
146  ret <vscale x 1 x i32> %0
147}
148
149define <vscale x 2 x i32> @test_2xi32(<vscale x 2 x i32> %in, <vscale x 2 x i32> %in2) nounwind {
150; CHECK-LABEL: test_2xi32:
151; CHECK:       # %bb.0: # %entry
152; CHECK-NEXT:    #APP
153; CHECK-NEXT:    vadd.vv v8, v8, v9
154; CHECK-NEXT:    #NO_APP
155; CHECK-NEXT:    ret
156entry:
157  %0 = tail call <vscale x 2 x i32> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 2 x i32> %in, <vscale x 2 x i32> %in2)
158  ret <vscale x 2 x i32> %0
159}
160
161define <vscale x 4 x i32> @test_4xi32(<vscale x 4 x i32> %in, <vscale x 4 x i32> %in2) nounwind {
162; CHECK-LABEL: test_4xi32:
163; CHECK:       # %bb.0: # %entry
164; CHECK-NEXT:    #APP
165; CHECK-NEXT:    vadd.vv v8, v8, v10
166; CHECK-NEXT:    #NO_APP
167; CHECK-NEXT:    ret
168entry:
169  %0 = tail call <vscale x 4 x i32> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 4 x i32> %in, <vscale x 4 x i32> %in2)
170  ret <vscale x 4 x i32> %0
171}
172
173define <vscale x 8 x i32> @test_8xi32(<vscale x 8 x i32> %in, <vscale x 8 x i32> %in2) nounwind {
174; CHECK-LABEL: test_8xi32:
175; CHECK:       # %bb.0: # %entry
176; CHECK-NEXT:    #APP
177; CHECK-NEXT:    vadd.vv v8, v8, v12
178; CHECK-NEXT:    #NO_APP
179; CHECK-NEXT:    ret
180entry:
181  %0 = tail call <vscale x 8 x i32> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 8 x i32> %in, <vscale x 8 x i32> %in2)
182  ret <vscale x 8 x i32> %0
183}
184
185define <vscale x 16 x i32> @test_16xi32(<vscale x 16 x i32> %in, <vscale x 16 x i32> %in2) nounwind {
186; CHECK-LABEL: test_16xi32:
187; CHECK:       # %bb.0: # %entry
188; CHECK-NEXT:    #APP
189; CHECK-NEXT:    vadd.vv v8, v8, v16
190; CHECK-NEXT:    #NO_APP
191; CHECK-NEXT:    ret
192entry:
193  %0 = tail call <vscale x 16 x i32> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 16 x i32> %in, <vscale x 16 x i32> %in2)
194  ret <vscale x 16 x i32> %0
195}
196
197define <vscale x 1 x i16> @test_1xi16(<vscale x 1 x i16> %in, <vscale x 1 x i16> %in2) nounwind {
198; CHECK-LABEL: test_1xi16:
199; CHECK:       # %bb.0: # %entry
200; CHECK-NEXT:    #APP
201; CHECK-NEXT:    vadd.vv v8, v8, v9
202; CHECK-NEXT:    #NO_APP
203; CHECK-NEXT:    ret
204entry:
205  %0 = tail call <vscale x 1 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 1 x i16> %in, <vscale x 1 x i16> %in2)
206  ret <vscale x 1 x i16> %0
207}
208
209define <vscale x 2 x i16> @test_2xi16(<vscale x 2 x i16> %in, <vscale x 2 x i16> %in2) nounwind {
210; CHECK-LABEL: test_2xi16:
211; CHECK:       # %bb.0: # %entry
212; CHECK-NEXT:    #APP
213; CHECK-NEXT:    vadd.vv v8, v8, v9
214; CHECK-NEXT:    #NO_APP
215; CHECK-NEXT:    ret
216entry:
217  %0 = tail call <vscale x 2 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 2 x i16> %in, <vscale x 2 x i16> %in2)
218  ret <vscale x 2 x i16> %0
219}
220
221define <vscale x 4 x i16> @test_4xi16(<vscale x 4 x i16> %in, <vscale x 4 x i16> %in2) nounwind {
222; CHECK-LABEL: test_4xi16:
223; CHECK:       # %bb.0: # %entry
224; CHECK-NEXT:    #APP
225; CHECK-NEXT:    vadd.vv v8, v8, v9
226; CHECK-NEXT:    #NO_APP
227; CHECK-NEXT:    ret
228entry:
229  %0 = tail call <vscale x 4 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 4 x i16> %in, <vscale x 4 x i16> %in2)
230  ret <vscale x 4 x i16> %0
231}
232
233define <vscale x 8 x i16> @test_8xi16(<vscale x 8 x i16> %in, <vscale x 8 x i16> %in2) nounwind {
234; CHECK-LABEL: test_8xi16:
235; CHECK:       # %bb.0: # %entry
236; CHECK-NEXT:    #APP
237; CHECK-NEXT:    vadd.vv v8, v8, v10
238; CHECK-NEXT:    #NO_APP
239; CHECK-NEXT:    ret
240entry:
241  %0 = tail call <vscale x 8 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 8 x i16> %in, <vscale x 8 x i16> %in2)
242  ret <vscale x 8 x i16> %0
243}
244
245define <vscale x 16 x i16> @test_16xi16(<vscale x 16 x i16> %in, <vscale x 16 x i16> %in2) nounwind {
246; CHECK-LABEL: test_16xi16:
247; CHECK:       # %bb.0: # %entry
248; CHECK-NEXT:    #APP
249; CHECK-NEXT:    vadd.vv v8, v8, v12
250; CHECK-NEXT:    #NO_APP
251; CHECK-NEXT:    ret
252entry:
253  %0 = tail call <vscale x 16 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 16 x i16> %in, <vscale x 16 x i16> %in2)
254  ret <vscale x 16 x i16> %0
255}
256
257define <vscale x 32 x i16> @test_32xi16(<vscale x 32 x i16> %in, <vscale x 32 x i16> %in2) nounwind {
258; CHECK-LABEL: test_32xi16:
259; CHECK:       # %bb.0: # %entry
260; CHECK-NEXT:    #APP
261; CHECK-NEXT:    vadd.vv v8, v8, v16
262; CHECK-NEXT:    #NO_APP
263; CHECK-NEXT:    ret
264entry:
265  %0 = tail call <vscale x 32 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 32 x i16> %in, <vscale x 32 x i16> %in2)
266  ret <vscale x 32 x i16> %0
267}
268
269define <vscale x 1 x i8> @test_1xi8(<vscale x 1 x i8> %in, <vscale x 1 x i8> %in2) nounwind {
270; CHECK-LABEL: test_1xi8:
271; CHECK:       # %bb.0: # %entry
272; CHECK-NEXT:    #APP
273; CHECK-NEXT:    vadd.vv v8, v8, v9
274; CHECK-NEXT:    #NO_APP
275; CHECK-NEXT:    ret
276entry:
277  %0 = tail call <vscale x 1 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 1 x i8> %in, <vscale x 1 x i8> %in2)
278  ret <vscale x 1 x i8> %0
279}
280
281define <vscale x 2 x i8> @test_2xi8(<vscale x 2 x i8> %in, <vscale x 2 x i8> %in2) nounwind {
282; CHECK-LABEL: test_2xi8:
283; CHECK:       # %bb.0: # %entry
284; CHECK-NEXT:    #APP
285; CHECK-NEXT:    vadd.vv v8, v8, v9
286; CHECK-NEXT:    #NO_APP
287; CHECK-NEXT:    ret
288entry:
289  %0 = tail call <vscale x 2 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 2 x i8> %in, <vscale x 2 x i8> %in2)
290  ret <vscale x 2 x i8> %0
291}
292
293define <vscale x 4 x i8> @test_4xi8(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) nounwind {
294; CHECK-LABEL: test_4xi8:
295; CHECK:       # %bb.0: # %entry
296; CHECK-NEXT:    #APP
297; CHECK-NEXT:    vadd.vv v8, v8, v9
298; CHECK-NEXT:    #NO_APP
299; CHECK-NEXT:    ret
300entry:
301  %0 = tail call <vscale x 4 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2)
302  ret <vscale x 4 x i8> %0
303}
304
305define <vscale x 8 x i8> @test_8xi8(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) nounwind {
306; CHECK-LABEL: test_8xi8:
307; CHECK:       # %bb.0: # %entry
308; CHECK-NEXT:    #APP
309; CHECK-NEXT:    vadd.vv v8, v8, v9
310; CHECK-NEXT:    #NO_APP
311; CHECK-NEXT:    ret
312entry:
313  %0 = tail call <vscale x 8 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2)
314  ret <vscale x 8 x i8> %0
315}
316
317define <vscale x 16 x i8> @test_16xi8(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) nounwind {
318; CHECK-LABEL: test_16xi8:
319; CHECK:       # %bb.0: # %entry
320; CHECK-NEXT:    #APP
321; CHECK-NEXT:    vadd.vv v8, v8, v10
322; CHECK-NEXT:    #NO_APP
323; CHECK-NEXT:    ret
324entry:
325  %0 = tail call <vscale x 16 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2)
326  ret <vscale x 16 x i8> %0
327}
328
329define <vscale x 32 x i8> @test_32xi8(<vscale x 32 x i8> %in, <vscale x 32 x i8> %in2) nounwind {
330; CHECK-LABEL: test_32xi8:
331; CHECK:       # %bb.0: # %entry
332; CHECK-NEXT:    #APP
333; CHECK-NEXT:    vadd.vv v8, v8, v12
334; CHECK-NEXT:    #NO_APP
335; CHECK-NEXT:    ret
336entry:
337  %0 = tail call <vscale x 32 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 32 x i8> %in, <vscale x 32 x i8> %in2)
338  ret <vscale x 32 x i8> %0
339}
340
341define <vscale x 64 x i8> @test_64xi8(<vscale x 64 x i8> %in, <vscale x 64 x i8> %in2) nounwind {
342; CHECK-LABEL: test_64xi8:
343; CHECK:       # %bb.0: # %entry
344; CHECK-NEXT:    #APP
345; CHECK-NEXT:    vadd.vv v8, v8, v16
346; CHECK-NEXT:    #NO_APP
347; CHECK-NEXT:    ret
348entry:
349  %0 = tail call <vscale x 64 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 64 x i8> %in, <vscale x 64 x i8> %in2)
350  ret <vscale x 64 x i8> %0
351}
352
353define <vscale x 64 x i8> @test_64xi8_with_mask(<vscale x 64 x i8> %in, <vscale x 64 x i8> %in2, <vscale x 64 x i1> %mask) nounwind {
354; CHECK-LABEL: test_64xi8_with_mask:
355; CHECK:       # %bb.0: # %entry
356; CHECK-NEXT:    #APP
357; CHECK-NEXT:    vadd.vv v8, v8, v16, v0.t
358; CHECK-NEXT:    #NO_APP
359; CHECK-NEXT:    ret
360entry:
361  %0 = tail call <vscale x 64 x i8> asm "vadd.vv $0, $1, $2, $3.t", "=^vr,^vr,^vr,^vm"(<vscale x 64 x i8> %in, <vscale x 64 x i8> %in2, <vscale x 64 x i1> %mask)
362  ret <vscale x 64 x i8> %0
363}
364
365define <vscale x 4 x i8> @test_specify_reg_mf2(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) nounwind {
366; CHECK-LABEL: test_specify_reg_mf2:
367; CHECK:       # %bb.0: # %entry
368; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
369; CHECK-NEXT:    vmv1r.v v2, v9
370; CHECK-NEXT:    vmv1r.v v1, v8
371; CHECK-NEXT:    #APP
372; CHECK-NEXT:    vadd.vv v0, v1, v2
373; CHECK-NEXT:    #NO_APP
374; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
375; CHECK-NEXT:    vmv1r.v v8, v0
376; CHECK-NEXT:    ret
377entry:
378  %0 = tail call <vscale x 4 x i8> asm "vadd.vv $0, $1, $2", "={v0},{v1},{v2}"(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2)
379  ret <vscale x 4 x i8> %0
380}
381
382define <vscale x 8 x i8> @test_specify_reg_m1(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) nounwind {
383; CHECK-LABEL: test_specify_reg_m1:
384; CHECK:       # %bb.0: # %entry
385; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
386; CHECK-NEXT:    vmv1r.v v2, v9
387; CHECK-NEXT:    vmv1r.v v1, v8
388; CHECK-NEXT:    #APP
389; CHECK-NEXT:    vadd.vv v0, v1, v2
390; CHECK-NEXT:    #NO_APP
391; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
392; CHECK-NEXT:    vmv1r.v v8, v0
393; CHECK-NEXT:    ret
394entry:
395  %0 = tail call <vscale x 8 x i8> asm "vadd.vv $0, $1, $2", "={v0},{v1},{v2}"(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2)
396  ret <vscale x 8 x i8> %0
397}
398
399define <vscale x 16 x i8> @test_specify_reg_m2(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) nounwind {
400; CHECK-LABEL: test_specify_reg_m2:
401; CHECK:       # %bb.0: # %entry
402; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
403; CHECK-NEXT:    vmv2r.v v4, v10
404; CHECK-NEXT:    vmv2r.v v2, v8
405; CHECK-NEXT:    #APP
406; CHECK-NEXT:    vadd.vv v0, v2, v4
407; CHECK-NEXT:    #NO_APP
408; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
409; CHECK-NEXT:    vmv2r.v v8, v0
410; CHECK-NEXT:    ret
411entry:
412  %0 = tail call <vscale x 16 x i8> asm "vadd.vv $0, $1, $2", "={v0},{v2},{v4}"(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2)
413  ret <vscale x 16 x i8> %0
414}
415
416define <vscale x 1 x i1> @test_specify_reg_mask(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) nounwind {
417; CHECK-LABEL: test_specify_reg_mask:
418; CHECK:       # %bb.0: # %entry
419; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
420; CHECK-NEXT:    vmv1r.v v2, v8
421; CHECK-NEXT:    vmv1r.v v1, v0
422; CHECK-NEXT:    #APP
423; CHECK-NEXT:    vmand.mm v0, v1, v2
424; CHECK-NEXT:    #NO_APP
425; CHECK-NEXT:    ret
426entry:
427  %0 = tail call <vscale x 1 x i1> asm "vmand.mm $0, $1, $2", "={v0},{v1},{v2}"(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2)
428  ret <vscale x 1 x i1> %0
429}
430
431define void @test_vector_tuple_type0(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base) nounwind {
432; CHECK-LABEL: test_vector_tuple_type0:
433; CHECK:       # %bb.0: # %entry
434; CHECK-NEXT:    #APP
435; CHECK-NEXT:    vsseg3e8.v v8, (a0)
436; CHECK-NEXT:    #NO_APP
437; CHECK-NEXT:    ret
438entry:
439  tail call void asm "vsseg3e8.v $0, ($1)", "^vr,r"(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) %val, ptr %base)
440  ret void
441}
442
443define void @test_vector_tuple_type1(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base) nounwind {
444; CHECK-LABEL: test_vector_tuple_type1:
445; CHECK:       # %bb.0: # %entry
446; CHECK-NEXT:    #APP
447; CHECK-NEXT:    vsseg3e8.v v8, (a0)
448; CHECK-NEXT:    #NO_APP
449; CHECK-NEXT:    ret
450entry:
451  tail call void asm "vsseg3e8.v $0, ($1)", "^vr,r"(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) %val, ptr %base)
452  ret void
453}
454
455define void @test_vector_tuple_type2(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val2, target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val3, ptr %base) nounwind {
456; CHECK-LABEL: test_vector_tuple_type2:
457; CHECK:       # %bb.0: # %entry
458; CHECK-NEXT:    vl1r.v v23, (a0)
459; CHECK-NEXT:    csrr a2, vlenb
460; CHECK-NEXT:    add a0, a0, a2
461; CHECK-NEXT:    vl1r.v v24, (a0)
462; CHECK-NEXT:    add a0, a0, a2
463; CHECK-NEXT:    vl1r.v v25, (a0)
464; CHECK-NEXT:    add a0, a0, a2
465; CHECK-NEXT:    vl1r.v v26, (a0)
466; CHECK-NEXT:    add a0, a0, a2
467; CHECK-NEXT:    vl1r.v v27, (a0)
468; CHECK-NEXT:    add a0, a0, a2
469; CHECK-NEXT:    vl1r.v v28, (a0)
470; CHECK-NEXT:    add a0, a0, a2
471; CHECK-NEXT:    vl1r.v v29, (a0)
472; CHECK-NEXT:    #APP
473; CHECK-NEXT:    vsseg3e8.v v8, (a1)
474; CHECK-NEXT:    #NO_APP
475; CHECK-NEXT:    #APP
476; CHECK-NEXT:    vsseg7e8.v v16, (a1)
477; CHECK-NEXT:    #NO_APP
478; CHECK-NEXT:    #APP
479; CHECK-NEXT:    vsseg7e8.v v23, (a1)
480; CHECK-NEXT:    #NO_APP
481; CHECK-NEXT:    ret
482entry:
483  tail call void asm "vsseg3e8.v $0, ($1)", "^vr,r"(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %val, ptr %base)
484  tail call void asm "vsseg7e8.v $0, ($1)", "^vr,r"(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val2, ptr %base)
485  tail call void asm "vsseg7e8.v $0, ($1)", "^vr,r"(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) %val3, ptr %base)
486  ret void
487}
488