xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll (revision 7638409d436394a2007111cd3eea14ca982b0061)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v,+f -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4
5declare <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
6  <vscale x 1 x i64>,
7  <vscale x 1 x i64>,
8  i64,
9  <vscale x 1 x i1>,
10  i32,
11  i32);
12
13define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
14; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
17; CHECK-NEXT:    slli a3, a3, 1
18; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
19; CHECK-NEXT:    vslide1down.vx v9, v9, a0
20; CHECK-NEXT:    vslide1down.vx v9, v9, a1
21; CHECK-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
22; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
23; CHECK-NEXT:    ret
24entry:
25  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
26    <vscale x 1 x i64> %0,
27    <vscale x 1 x i64> %1,
28    i64 %2,
29    <vscale x 1 x i1> %3,
30    i32 %4, i32 0)
31
32  ret <vscale x 1 x i64> %a
33}
34
35define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
36; CHECK-LABEL: intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64:
37; CHECK:       # %bb.0: # %entry
38; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
39; CHECK-NEXT:    slli a3, a3, 1
40; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
41; CHECK-NEXT:    vslide1down.vx v9, v9, a0
42; CHECK-NEXT:    vslide1down.vx v9, v9, a1
43; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
44; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
45; CHECK-NEXT:    ret
46entry:
47  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
48    <vscale x 1 x i64> %0,
49    <vscale x 1 x i64> %1,
50    i64 %2,
51    <vscale x 1 x i1> %3,
52    i32 %4, i32 1)
53
54  ret <vscale x 1 x i64> %a
55}
56
57
58; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic.
59define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
60; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
63; CHECK-NEXT:    slli a3, a3, 1
64; CHECK-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
65; CHECK-NEXT:    vslide1down.vx v9, v9, a0
66; CHECK-NEXT:    vslide1down.vx v9, v9, a1
67; CHECK-NEXT:    vsetvli zero, a2, e64, m1, tu, ma
68; CHECK-NEXT:    vmerge.vvm v8, v8, v9, v0
69; CHECK-NEXT:    ret
70entry:
71  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
72    <vscale x 1 x i64> %0,
73    <vscale x 1 x i64> %1,
74    i64 %2,
75    <vscale x 1 x i1> %3,
76    i32 %4, i32 2)
77
78  ret <vscale x 1 x i64> %a
79}
80
81; Fallback vslide1 to mask undisturbed until InsertVSETVLI supports mask agnostic.
82define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
83; CHECK-LABEL: intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
86; CHECK-NEXT:    slli a2, a2, 1
87; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
88; CHECK-NEXT:    vslide1down.vx v8, v8, a0
89; CHECK-NEXT:    vslide1down.vx v8, v8, a1
90; CHECK-NEXT:    ret
91entry:
92  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
93    <vscale x 1 x i64> undef,
94    <vscale x 1 x i64> %0,
95    i64 %1,
96    <vscale x 1 x i1> %2,
97    i32 %3, i32 3)
98
99  ret <vscale x 1 x i64> %a
100}
101
102define <vscale x 1 x i64> @intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
103; CHECK-LABEL: intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64:
104; CHECK:       # %bb.0: # %entry
105; CHECK-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
106; CHECK-NEXT:    slli a2, a2, 1
107; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
108; CHECK-NEXT:    vslide1down.vx v8, v8, a0
109; CHECK-NEXT:    vslide1down.vx v8, v8, a1
110; CHECK-NEXT:    ret
111entry:
112  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
113    <vscale x 1 x i64> undef,
114    <vscale x 1 x i64> %0,
115    i64 %1,
116    <vscale x 1 x i1> undef,
117    i32 %2, i32 3)
118
119  ret <vscale x 1 x i64> %a
120}
121