xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+m,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
4; RUN: llc -mtriple=riscv64 -mattr=+m,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
6
7%struct = type { i64, i64, ptr, i32, i32, i32, [4 x i32] }
8
9define void @complex_gep(ptr %p, <vscale x 2 x i64> %vec.ind, <vscale x 2 x i1> %m) {
10; RV32-LABEL: complex_gep:
11; RV32:       # %bb.0:
12; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
13; RV32-NEXT:    vnsrl.wi v10, v8, 0
14; RV32-NEXT:    li a1, 48
15; RV32-NEXT:    addi a0, a0, 28
16; RV32-NEXT:    vmul.vx v8, v10, a1
17; RV32-NEXT:    vmv.v.i v9, 0
18; RV32-NEXT:    vsoxei32.v v9, (a0), v8, v0.t
19; RV32-NEXT:    ret
20;
21; RV64-LABEL: complex_gep:
22; RV64:       # %bb.0:
23; RV64-NEXT:    li a1, 56
24; RV64-NEXT:    addi a0, a0, 32
25; RV64-NEXT:    vsetvli a2, zero, e64, m2, ta, ma
26; RV64-NEXT:    vmul.vx v8, v8, a1
27; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
28; RV64-NEXT:    vmv.v.i v10, 0
29; RV64-NEXT:    vsoxei64.v v10, (a0), v8, v0.t
30; RV64-NEXT:    ret
31  %gep = getelementptr inbounds %struct, ptr %p, <vscale x 2 x i64> %vec.ind, i32 5
32  call void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32> zeroinitializer, <vscale x 2 x ptr> %gep, i32 8, <vscale x 2 x i1> %m)
33  ret void
34}
35
36define void @strided_store_zero_start(i64 %n, ptr %p) {
37; RV32-LABEL: strided_store_zero_start:
38; RV32:       # %bb.0:
39; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
40; RV32-NEXT:    vid.v v8
41; RV32-NEXT:    li a0, 48
42; RV32-NEXT:    addi a1, a2, 32
43; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
44; RV32-NEXT:    vnsrl.wi v8, v8, 0
45; RV32-NEXT:    vmul.vx v8, v8, a0
46; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
47; RV32-NEXT:    vmv.v.i v9, 0
48; RV32-NEXT:    vsoxei32.v v9, (a1), v8
49; RV32-NEXT:    ret
50;
51; RV64-LABEL: strided_store_zero_start:
52; RV64:       # %bb.0:
53; RV64-NEXT:    addi a0, a1, 36
54; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
55; RV64-NEXT:    vmv.v.i v8, 0
56; RV64-NEXT:    li a1, 56
57; RV64-NEXT:    vsse64.v v8, (a0), a1
58; RV64-NEXT:    ret
59  %step = tail call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
60  %gep = getelementptr inbounds %struct, ptr %p, <vscale x 1 x i64> %step, i32 6
61  tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
62  ret void
63}
64
65define void @strided_store_offset_start(i64 %n, ptr %p) {
66; RV32-LABEL: strided_store_offset_start:
67; RV32:       # %bb.0:
68; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
69; RV32-NEXT:    vid.v v8
70; RV32-NEXT:    vadd.vx v8, v8, a0
71; RV32-NEXT:    li a0, 48
72; RV32-NEXT:    addi a1, a2, 32
73; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
74; RV32-NEXT:    vnsrl.wi v8, v8, 0
75; RV32-NEXT:    vmul.vx v8, v8, a0
76; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
77; RV32-NEXT:    vmv.v.i v9, 0
78; RV32-NEXT:    vsoxei32.v v9, (a1), v8
79; RV32-NEXT:    ret
80;
81; RV64-LABEL: strided_store_offset_start:
82; RV64:       # %bb.0:
83; RV64-NEXT:    slli a2, a0, 3
84; RV64-NEXT:    slli a0, a0, 6
85; RV64-NEXT:    vsetvli a3, zero, e64, m1, ta, ma
86; RV64-NEXT:    vmv.v.i v8, 0
87; RV64-NEXT:    sub a0, a0, a2
88; RV64-NEXT:    add a0, a1, a0
89; RV64-NEXT:    addi a0, a0, 36
90; RV64-NEXT:    li a1, 56
91; RV64-NEXT:    vsse64.v v8, (a0), a1
92; RV64-NEXT:    ret
93  %step = tail call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
94  %.splatinsert = insertelement <vscale x 1 x i64> poison, i64 %n, i64 0
95  %.splat = shufflevector <vscale x 1 x i64> %.splatinsert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
96  %add = add <vscale x 1 x i64> %step, %.splat
97  %gep = getelementptr inbounds %struct, ptr %p, <vscale x 1 x i64> %add, i32 6
98  tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
99  ret void
100}
101
102define void @stride_one_store(i64 %n, ptr %p) {
103; RV32-LABEL: stride_one_store:
104; RV32:       # %bb.0:
105; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
106; RV32-NEXT:    vid.v v8
107; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
108; RV32-NEXT:    vnsrl.wi v8, v8, 0
109; RV32-NEXT:    vsll.vi v8, v8, 3
110; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
111; RV32-NEXT:    vmv.v.i v9, 0
112; RV32-NEXT:    vsoxei32.v v9, (a2), v8
113; RV32-NEXT:    ret
114;
115; RV64-LABEL: stride_one_store:
116; RV64:       # %bb.0:
117; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
118; RV64-NEXT:    vmv.v.i v8, 0
119; RV64-NEXT:    vs1r.v v8, (a1)
120; RV64-NEXT:    ret
121  %step = tail call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
122  %gep = getelementptr inbounds i64, ptr %p, <vscale x 1 x i64> %step
123  tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
124  ret void
125}
126
127declare <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
128declare void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
129declare void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>)
130