xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vlm.ll (revision ff9af4c43ad71eeba2cabe99609cfaa0fd54c1d0)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(ptr, iXLen);
8
9define <vscale x 1 x i1> @intrinsic_vlm_v_nxv1i1(ptr %0, iXLen %1) nounwind {
10; CHECK-LABEL: intrinsic_vlm_v_nxv1i1:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
13; CHECK-NEXT:    vlm.v v0, (a0)
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i1> @llvm.riscv.vlm.nxv1i1(ptr %0, iXLen %1)
17  ret <vscale x 1 x i1> %a
18}
19
20declare <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(ptr, iXLen);
21
22define <vscale x 2 x i1> @intrinsic_vlm_v_nxv2i1(ptr %0, iXLen %1) nounwind {
23; CHECK-LABEL: intrinsic_vlm_v_nxv2i1:
24; CHECK:       # %bb.0: # %entry
25; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
26; CHECK-NEXT:    vlm.v v0, (a0)
27; CHECK-NEXT:    ret
28entry:
29  %a = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1(ptr %0, iXLen %1)
30  ret <vscale x 2 x i1> %a
31}
32
33declare <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(ptr, iXLen);
34
35define <vscale x 4 x i1> @intrinsic_vlm_v_nxv4i1(ptr %0, iXLen %1) nounwind {
36; CHECK-LABEL: intrinsic_vlm_v_nxv4i1:
37; CHECK:       # %bb.0: # %entry
38; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
39; CHECK-NEXT:    vlm.v v0, (a0)
40; CHECK-NEXT:    ret
41entry:
42  %a = call <vscale x 4 x i1> @llvm.riscv.vlm.nxv4i1(ptr %0, iXLen %1)
43  ret <vscale x 4 x i1> %a
44}
45
46declare <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(ptr, iXLen);
47
48define <vscale x 8 x i1> @intrinsic_vlm_v_nxv8i1(ptr %0, iXLen %1) nounwind {
49; CHECK-LABEL: intrinsic_vlm_v_nxv8i1:
50; CHECK:       # %bb.0: # %entry
51; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
52; CHECK-NEXT:    vlm.v v0, (a0)
53; CHECK-NEXT:    ret
54entry:
55  %a = call <vscale x 8 x i1> @llvm.riscv.vlm.nxv8i1(ptr %0, iXLen %1)
56  ret <vscale x 8 x i1> %a
57}
58
59declare <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(ptr, iXLen);
60
61define <vscale x 16 x i1> @intrinsic_vlm_v_nxv16i1(ptr %0, iXLen %1) nounwind {
62; CHECK-LABEL: intrinsic_vlm_v_nxv16i1:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
65; CHECK-NEXT:    vlm.v v0, (a0)
66; CHECK-NEXT:    ret
67entry:
68  %a = call <vscale x 16 x i1> @llvm.riscv.vlm.nxv16i1(ptr %0, iXLen %1)
69  ret <vscale x 16 x i1> %a
70}
71
72declare <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(ptr, iXLen);
73
74define <vscale x 32 x i1> @intrinsic_vlm_v_nxv32i1(ptr %0, iXLen %1) nounwind {
75; CHECK-LABEL: intrinsic_vlm_v_nxv32i1:
76; CHECK:       # %bb.0: # %entry
77; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
78; CHECK-NEXT:    vlm.v v0, (a0)
79; CHECK-NEXT:    ret
80entry:
81  %a = call <vscale x 32 x i1> @llvm.riscv.vlm.nxv32i1(ptr %0, iXLen %1)
82  ret <vscale x 32 x i1> %a
83}
84
85declare <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(ptr, iXLen);
86
87define <vscale x 64 x i1> @intrinsic_vlm_v_nxv64i1(ptr %0, iXLen %1) nounwind {
88; CHECK-LABEL: intrinsic_vlm_v_nxv64i1:
89; CHECK:       # %bb.0: # %entry
90; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
91; CHECK-NEXT:    vlm.v v0, (a0)
92; CHECK-NEXT:    ret
93entry:
94  %a = call <vscale x 64 x i1> @llvm.riscv.vlm.nxv64i1(ptr %0, iXLen %1)
95  ret <vscale x 64 x i1> %a
96}
97