xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vsm.ll (revision ff9af4c43ad71eeba2cabe99609cfaa0fd54c1d0)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1>, ptr, iXLen);
8
9define void @intrinsic_vsm_v_nxv1i1(<vscale x 1 x i1> %0, ptr %1, iXLen %2) nounwind {
10; CHECK-LABEL: intrinsic_vsm_v_nxv1i1:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
13; CHECK-NEXT:    vsm.v v0, (a0)
14; CHECK-NEXT:    ret
15entry:
16  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %0, ptr %1, iXLen %2)
17  ret void
18}
19
20declare void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1>, ptr, iXLen);
21
22define void @intrinsic_vsm_v_nxv2i1(<vscale x 2 x i1> %0, ptr %1, iXLen %2) nounwind {
23; CHECK-LABEL: intrinsic_vsm_v_nxv2i1:
24; CHECK:       # %bb.0: # %entry
25; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
26; CHECK-NEXT:    vsm.v v0, (a0)
27; CHECK-NEXT:    ret
28entry:
29  call void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1> %0, ptr %1, iXLen %2)
30  ret void
31}
32
33declare void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1>, ptr, iXLen);
34
35define void @intrinsic_vsm_v_nxv4i1(<vscale x 4 x i1> %0, ptr %1, iXLen %2) nounwind {
36; CHECK-LABEL: intrinsic_vsm_v_nxv4i1:
37; CHECK:       # %bb.0: # %entry
38; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
39; CHECK-NEXT:    vsm.v v0, (a0)
40; CHECK-NEXT:    ret
41entry:
42  call void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1> %0, ptr %1, iXLen %2)
43  ret void
44}
45
46declare void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1>, ptr, iXLen);
47
48define void @intrinsic_vsm_v_nxv8i1(<vscale x 8 x i1> %0, ptr %1, iXLen %2) nounwind {
49; CHECK-LABEL: intrinsic_vsm_v_nxv8i1:
50; CHECK:       # %bb.0: # %entry
51; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
52; CHECK-NEXT:    vsm.v v0, (a0)
53; CHECK-NEXT:    ret
54entry:
55  call void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1> %0, ptr %1, iXLen %2)
56  ret void
57}
58
59declare void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1>, ptr, iXLen);
60
61define void @intrinsic_vsm_v_nxv16i1(<vscale x 16 x i1> %0, ptr %1, iXLen %2) nounwind {
62; CHECK-LABEL: intrinsic_vsm_v_nxv16i1:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
65; CHECK-NEXT:    vsm.v v0, (a0)
66; CHECK-NEXT:    ret
67entry:
68  call void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1> %0, ptr %1, iXLen %2)
69  ret void
70}
71
72declare void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1>, ptr, iXLen);
73
74define void @intrinsic_vsm_v_nxv32i1(<vscale x 32 x i1> %0, ptr %1, iXLen %2) nounwind {
75; CHECK-LABEL: intrinsic_vsm_v_nxv32i1:
76; CHECK:       # %bb.0: # %entry
77; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
78; CHECK-NEXT:    vsm.v v0, (a0)
79; CHECK-NEXT:    ret
80entry:
81  call void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1> %0, ptr %1, iXLen %2)
82  ret void
83}
84
85declare void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1>, ptr, iXLen);
86
87define void @intrinsic_vsm_v_nxv64i1(<vscale x 64 x i1> %0, ptr %1, iXLen %2) nounwind {
88; CHECK-LABEL: intrinsic_vsm_v_nxv64i1:
89; CHECK:       # %bb.0: # %entry
90; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
91; CHECK-NEXT:    vsm.v v0, (a0)
92; CHECK-NEXT:    ret
93entry:
94  call void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1> %0, ptr %1, iXLen %2)
95  ret void
96}
97
98declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
99  <vscale x 1 x i16>,
100  <vscale x 1 x i16>,
101  iXLen);
102
103; Make sure we can use the vsetvli from the producing instruction.
104define void @test_vsetvli_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, ptr %2, iXLen %3) nounwind {
105; CHECK-LABEL: test_vsetvli_i16:
106; CHECK:       # %bb.0: # %entry
107; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
108; CHECK-NEXT:    vmseq.vv v8, v8, v9
109; CHECK-NEXT:    vsm.v v8, (a0)
110; CHECK-NEXT:    ret
111entry:
112  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
113    <vscale x 1 x i16> %0,
114    <vscale x 1 x i16> %1,
115    iXLen %3)
116  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, ptr %2, iXLen %3)
117  ret void
118}
119
120declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
121  <vscale x 1 x i32>,
122  <vscale x 1 x i32>,
123  iXLen);
124
125define void @test_vsetvli_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, ptr %2, iXLen %3) nounwind {
126; CHECK-LABEL: test_vsetvli_i32:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
129; CHECK-NEXT:    vmseq.vv v8, v8, v9
130; CHECK-NEXT:    vsm.v v8, (a0)
131; CHECK-NEXT:    ret
132entry:
133  %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
134    <vscale x 1 x i32> %0,
135    <vscale x 1 x i32> %1,
136    iXLen %3)
137  call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, ptr %2, iXLen %3)
138  ret void
139}
140