xref: /llvm-project/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll (revision 2be0abb7fe72ed4537b3eabcd3102d48ea845717)
1; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
2; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s
3
4target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
5
6; Check position of the inserted vector load/store.  Vectorized loads should be
7; inserted at the position of the first load in the chain, and stores should be
8; inserted at the position of the last store.
9
10; CHECK-LABEL: @insert_load_point(
11; CHECK: %z = add i32 %x, 4
12; CHECK: load <2 x float>
13; CHECK: %w = add i32 %y, 9
14; CHECK: %foo = add i32 %z, %w
15define amdgpu_kernel void @insert_load_point(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture %b, ptr addrspace(1) nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
16entry:
17  %a.idx.x = getelementptr inbounds float, ptr addrspace(1) %a, i64 %idx
18  %c.idx.x = getelementptr inbounds float, ptr addrspace(1) %c, i64 %idx
19  %a.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx.x, i64 1
20  %c.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %c.idx.x, i64 1
21
22  %z = add i32 %x, 4
23  %ld.c = load float, ptr addrspace(1) %c.idx.x, align 4
24  %w = add i32 %y, 9
25  %ld.c.idx.1 = load float, ptr addrspace(1) %c.idx.x.1, align 4
26  %foo = add i32 %z, %w
27
28  store float 0.0, ptr addrspace(1) %a.idx.x, align 4
29  store float 0.0, ptr addrspace(1) %a.idx.x.1, align 4
30
31  %add = fadd float %ld.c, %ld.c.idx.1
32  store float %add, ptr addrspace(1) %b, align 4
33  store i32 %foo, ptr addrspace(3) null, align 4
34  ret void
35}
36
37; CHECK-LABEL: @insert_store_point(
38; CHECK: %z = add i32 %x, 4
39; CHECK: %w = add i32 %y, 9
40; CHECK: store <2 x float>
41; CHECK: %foo = add i32 %z, %w
42define amdgpu_kernel void @insert_store_point(ptr addrspace(1) nocapture %a, ptr addrspace(1) nocapture %b, ptr addrspace(1) nocapture readonly %c, i64 %idx, i32 %x, i32 %y) #0 {
43entry:
44  %a.idx.x = getelementptr inbounds float, ptr addrspace(1) %a, i64 %idx
45  %c.idx.x = getelementptr inbounds float, ptr addrspace(1) %c, i64 %idx
46  %a.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx.x, i64 1
47  %c.idx.x.1 = getelementptr inbounds float, ptr addrspace(1) %c.idx.x, i64 1
48
49  %ld.c = load float, ptr addrspace(1) %c.idx.x, align 4
50  %ld.c.idx.1 = load float, ptr addrspace(1) %c.idx.x.1, align 4
51
52  %z = add i32 %x, 4
53  store float 0.0, ptr addrspace(1) %a.idx.x, align 4
54  %w = add i32 %y, 9
55  store float 0.0, ptr addrspace(1) %a.idx.x.1, align 4
56  %foo = add i32 %z, %w
57
58  %add = fadd float %ld.c, %ld.c.idx.1
59  store float %add, ptr addrspace(1) %b, align 4
60  store i32 %foo, ptr addrspace(3) null, align 4
61  ret void
62}
63
64; Here we have four stores, with an aliasing load before the last one.  We can
65; vectorize the first three stores as <3 x float>, but this vectorized store must
66; be inserted at the location of the third scalar store, not the fourth one.
67;
68; CHECK-LABEL: @insert_store_point_alias
69; CHECK: store <3 x float>
70; CHECK: load float, ptr addrspace(1) %a.idx.2
71; CHECK: store float
72; CHECK-SAME: %a.idx.3
73define float @insert_store_point_alias(ptr addrspace(1) nocapture %a, i64 %idx) {
74  %a.idx = getelementptr inbounds float, ptr addrspace(1) %a, i64 %idx
75  %a.idx.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx, i64 1
76  %a.idx.2 = getelementptr inbounds float, ptr addrspace(1) %a.idx.1, i64 1
77  %a.idx.3 = getelementptr inbounds float, ptr addrspace(1) %a.idx.2, i64 1
78
79  store float 0.0, ptr addrspace(1) %a.idx, align 4
80  store float 0.0, ptr addrspace(1) %a.idx.1, align 4
81  store float 0.0, ptr addrspace(1) %a.idx.2, align 4
82  %x = load float, ptr addrspace(1) %a.idx.2, align 4
83  store float 0.0, ptr addrspace(1) %a.idx.3, align 4
84
85  ret float %x
86}
87
88; Here we have four stores, with an aliasing load before the last one.  We can
89; vectorize three of the stores before the load, but the important thing is that
90; we *don't* sink the store to a[idx + 1] below the load.
91;
92; CHECK-LABEL: @insert_store_point_alias_ooo
93; CHECK: store <3 x float>{{.*}} %a.idx.1
94; CHECK: load float, ptr addrspace(1) %a.idx.2
95; CHECK: store float{{.*}} %a.idx
96define float @insert_store_point_alias_ooo(ptr addrspace(1) nocapture %a, i64 %idx) {
97  %a.idx = getelementptr inbounds float, ptr addrspace(1) %a, i64 %idx
98  %a.idx.1 = getelementptr inbounds float, ptr addrspace(1) %a.idx, i64 1
99  %a.idx.2 = getelementptr inbounds float, ptr addrspace(1) %a.idx.1, i64 1
100  %a.idx.3 = getelementptr inbounds float, ptr addrspace(1) %a.idx.2, i64 1
101
102  store float 0.0, ptr addrspace(1) %a.idx.3, align 4
103  store float 0.0, ptr addrspace(1) %a.idx.1, align 4
104  store float 0.0, ptr addrspace(1) %a.idx.2, align 4
105  %x = load float, ptr addrspace(1) %a.idx.2, align 4
106  store float 0.0, ptr addrspace(1) %a.idx, align 4
107
108  ret float %x
109}
110
111attributes #0 = { nounwind }
112