xref: /llvm-project/llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll (revision 806761a7629df268c8aed49657aeccffa6bca449)
1; RUN: sed 's/CODE_OBJECT_VERSION/200/g' %s | llc -show-mc-encoding -mattr=+promote-alloca -disable-promote-alloca-to-vector -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
2; RUN: sed 's/CODE_OBJECT_VERSION/200/g' %s | llc -show-mc-encoding -mattr=+promote-alloca -disable-promote-alloca-to-vector -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-unaligned-access-mode | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE %s
3; RUN: sed 's/CODE_OBJECT_VERSION/200/g' %s | llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
4; RUN: sed 's/CODE_OBJECT_VERSION/200/g' %s | llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri -mattr=-unaligned-access-mode | FileCheck -enable-var-scope -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA %s
5; RUN: sed 's/CODE_OBJECT_VERSION/200/g' %s | llc -show-mc-encoding -mattr=+promote-alloca -disable-promote-alloca-to-vector -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -mtriple=amdgcn -mcpu=tonga -mattr=-unaligned-access-mode | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
6; RUN: sed 's/CODE_OBJECT_VERSION/200/g' %s | llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -mtriple=amdgcn -mcpu=tonga -mattr=-unaligned-access-mode | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE-VECT -check-prefix=SI -check-prefix=FUNC %s
7; RUN: sed 's/CODE_OBJECT_VERSION/200/g' %s | llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -mtriple=amdgcn -mcpu=tonga -mattr=-unaligned-access-mode | FileCheck -enable-var-scope -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC %s
8
9; RUN: sed 's/CODE_OBJECT_VERSION/400/g' %s | opt -S -mtriple=amdgcn-unknown-amdhsa -data-layout=A5 -mcpu=kaveri -passes=amdgpu-promote-alloca -disable-promote-alloca-to-vector | FileCheck -enable-var-scope -check-prefix=HSAOPT -check-prefix=OPT %s
10; RUN: sed 's/CODE_OBJECT_VERSION/400/g' %s | opt -S -mtriple=amdgcn-unknown-unknown -data-layout=A5 -mcpu=kaveri -passes=amdgpu-promote-alloca -disable-promote-alloca-to-vector | FileCheck -enable-var-scope -check-prefix=NOHSAOPT -check-prefix=OPT %s
11
12; RUN: sed 's/CODE_OBJECT_VERSION/400/g' %s | llc -march=r600 -mcpu=cypress -disable-promote-alloca-to-vector | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
13; RUN: sed 's/CODE_OBJECT_VERSION/400/g' %s | llc -march=r600 -mcpu=cypress | FileCheck %s -check-prefix=R600-VECT -check-prefix=FUNC
14
15; HSAOPT: @mova_same_clause.stack = internal unnamed_addr addrspace(3) global [256 x [5 x i32]] poison, align 4
16; HSAOPT: @high_alignment.stack = internal unnamed_addr addrspace(3) global [256 x [8 x i32]] poison, align 16
17
18
19; FUNC-LABEL: {{^}}mova_same_clause:
20; OPT-LABEL: @mova_same_clause(
21
22; R600: LDS_WRITE
23; R600: LDS_WRITE
24; R600: LDS_READ
25; R600: LDS_READ
26
27; HSA-PROMOTE: .amd_kernel_code_t
28; HSA-PROMOTE: workgroup_group_segment_byte_size = 5120
29; HSA-PROMOTE: .end_amd_kernel_code_t
30
31; HSA-PROMOTE: s_load_dwordx2 s[{{[0-9:]+}}], s[4:5], 0x1
32
33; SI-PROMOTE: ds_write_b32
34; SI-PROMOTE: ds_write_b32
35; SI-PROMOTE: ds_read_b32
36; SI-PROMOTE: ds_read_b32
37
38; HSA-ALLOCA: .amd_kernel_code_t
39; FIXME: Creating the emergency stack slots causes us to over-estimate scratch
40; by 4 bytes.
41; HSA-ALLOCA: workitem_private_segment_byte_size = 24
42; HSA-ALLOCA: .end_amd_kernel_code_t
43
44; HSA-ALLOCA: s_mov_b32 flat_scratch_lo, s7
45; HSA-ALLOCA: s_add_i32 s6, s6, s9
46; HSA-ALLOCA: s_lshr_b32 flat_scratch_hi, s6, 8
47
48; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen ; encoding: [0x00,0x10,0x70,0xe0
49; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen ; encoding: [0x00,0x10,0x70,0xe0
50
51
52; HSAOPT: [[DISPATCH_PTR:%[0-9]+]] = call noalias nonnull dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
53; HSAOPT: [[GEP0:%[0-9]+]] = getelementptr inbounds i32, ptr addrspace(4) [[DISPATCH_PTR]], i64 1
54; HSAOPT: [[LDXY:%[0-9]+]] = load i32, ptr addrspace(4) [[GEP0]], align 4, !invariant.load !1
55; HSAOPT: [[GEP1:%[0-9]+]] = getelementptr inbounds i32, ptr addrspace(4) [[DISPATCH_PTR]], i64 2
56; HSAOPT: [[LDZU:%[0-9]+]] = load i32, ptr addrspace(4) [[GEP1]], align 4, !range !2, !invariant.load !1
57; HSAOPT: [[EXTRACTY:%[0-9]+]] = lshr i32 [[LDXY]], 16
58
59; HSAOPT: [[WORKITEM_ID_X:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.x(), !range !3
60; HSAOPT: [[WORKITEM_ID_Y:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.y(), !range !3
61; HSAOPT: [[WORKITEM_ID_Z:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.z(), !range !3
62
63; HSAOPT: [[Y_SIZE_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[EXTRACTY]], [[LDZU]]
64; HSAOPT: [[YZ_X_XID:%[0-9]+]] = mul i32 [[Y_SIZE_X_Z_SIZE]], [[WORKITEM_ID_X]]
65; HSAOPT: [[Y_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[WORKITEM_ID_Y]], [[LDZU]]
66; HSAOPT: [[ADD_YZ_X_X_YZ_SIZE:%[0-9]+]] = add i32 [[YZ_X_XID]], [[Y_X_Z_SIZE]]
67; HSAOPT: [[ADD_ZID:%[0-9]+]] = add i32 [[ADD_YZ_X_X_YZ_SIZE]], [[WORKITEM_ID_Z]]
68
69; HSAOPT: [[LOCAL_GEP:%[0-9]+]] = getelementptr inbounds [256 x [5 x i32]], ptr addrspace(3) @mova_same_clause.stack, i32 0, i32 [[ADD_ZID]]
70; HSAOPT: %arrayidx1 = getelementptr inbounds [5 x i32], ptr addrspace(3) [[LOCAL_GEP]], i32 0, i32 {{%[0-9]+}}
71; HSAOPT: %arrayidx3 = getelementptr inbounds [5 x i32], ptr addrspace(3) [[LOCAL_GEP]], i32 0, i32 {{%[0-9]+}}
72; HSAOPT: %arrayidx12 = getelementptr inbounds [5 x i32], ptr addrspace(3) [[LOCAL_GEP]], i32 0, i32 1
73
74
75; NOHSAOPT: call i32 @llvm.r600.read.local.size.y(), !range !1
76; NOHSAOPT: call i32 @llvm.r600.read.local.size.z(), !range !1
77; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.x(), !range !2
78; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.y(), !range !2
79; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.z(), !range !2
80define amdgpu_kernel void @mova_same_clause(ptr addrspace(1) nocapture %out, ptr addrspace(1) nocapture %in) #0 {
81entry:
82  %stack = alloca [5 x i32], align 4, addrspace(5)
83  %0 = load i32, ptr addrspace(1) %in, align 4
84  %arrayidx1 = getelementptr inbounds [5 x i32], ptr addrspace(5) %stack, i32 0, i32 %0
85  store i32 4, ptr addrspace(5) %arrayidx1, align 4
86  %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %in, i32 1
87  %1 = load i32, ptr addrspace(1) %arrayidx2, align 4
88  %arrayidx3 = getelementptr inbounds [5 x i32], ptr addrspace(5) %stack, i32 0, i32 %1
89  store i32 5, ptr addrspace(5) %arrayidx3, align 4
90  %2 = load i32, ptr addrspace(5) %stack, align 4
91  store i32 %2, ptr addrspace(1) %out, align 4
92  %arrayidx12 = getelementptr inbounds [5 x i32], ptr addrspace(5) %stack, i32 0, i32 1
93  %3 = load i32, ptr addrspace(5) %arrayidx12
94  %arrayidx13 = getelementptr inbounds i32, ptr addrspace(1) %out, i32 1
95  store i32 %3, ptr addrspace(1) %arrayidx13
96  ret void
97}
98
99; OPT-LABEL: @high_alignment(
100; OPT: getelementptr inbounds [256 x [8 x i32]], ptr addrspace(3) @high_alignment.stack, i32 0, i32 %{{[0-9]+}}
101define amdgpu_kernel void @high_alignment(ptr addrspace(1) nocapture %out, ptr addrspace(1) nocapture %in) #0 {
102entry:
103  %stack = alloca [8 x i32], align 16, addrspace(5)
104  %0 = load i32, ptr addrspace(1) %in, align 4
105  %arrayidx1 = getelementptr inbounds [8 x i32], ptr addrspace(5) %stack, i32 0, i32 %0
106  store i32 4, ptr addrspace(5) %arrayidx1, align 4
107  %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %in, i32 1
108  %1 = load i32, ptr addrspace(1) %arrayidx2, align 4
109  %arrayidx3 = getelementptr inbounds [8 x i32], ptr addrspace(5) %stack, i32 0, i32 %1
110  store i32 5, ptr addrspace(5) %arrayidx3, align 4
111  %2 = load i32, ptr addrspace(5) %stack, align 4
112  store i32 %2, ptr addrspace(1) %out, align 4
113  %arrayidx12 = getelementptr inbounds [8 x i32], ptr addrspace(5) %stack, i32 0, i32 1
114  %3 = load i32, ptr addrspace(5) %arrayidx12
115  %arrayidx13 = getelementptr inbounds i32, ptr addrspace(1) %out, i32 1
116  store i32 %3, ptr addrspace(1) %arrayidx13
117  ret void
118}
119
120; FUNC-LABEL: {{^}}no_replace_inbounds_gep:
121; OPT-LABEL: @no_replace_inbounds_gep(
122; OPT: alloca [5 x i32]
123
124; SI-NOT: ds_write
125define amdgpu_kernel void @no_replace_inbounds_gep(ptr addrspace(1) nocapture %out, ptr addrspace(1) nocapture %in) #0 {
126entry:
127  %stack = alloca [5 x i32], align 4, addrspace(5)
128  %0 = load i32, ptr addrspace(1) %in, align 4
129  %arrayidx1 = getelementptr [5 x i32], ptr addrspace(5) %stack, i32 0, i32 %0
130  store i32 4, ptr addrspace(5) %arrayidx1, align 4
131  %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %in, i32 1
132  %1 = load i32, ptr addrspace(1) %arrayidx2, align 4
133  %arrayidx3 = getelementptr inbounds [5 x i32], ptr addrspace(5) %stack, i32 0, i32 %1
134  store i32 5, ptr addrspace(5) %arrayidx3, align 4
135  %2 = load i32, ptr addrspace(5) %stack, align 4
136  store i32 %2, ptr addrspace(1) %out, align 4
137  %arrayidx12 = getelementptr inbounds [5 x i32], ptr addrspace(5) %stack, i32 0, i32 1
138  %3 = load i32, ptr addrspace(5) %arrayidx12
139  %arrayidx13 = getelementptr inbounds i32, ptr addrspace(1) %out, i32 1
140  store i32 %3, ptr addrspace(1) %arrayidx13
141  ret void
142}
143
144; This test checks that the stack offset is calculated correctly for structs.
145; All register loads/stores should be optimized away, so there shouldn't be
146; any MOVA instructions.
147;
148; XXX: This generated code has unnecessary MOVs, we should be able to optimize
149; this.
150
151; FUNC-LABEL: {{^}}multiple_structs:
152; OPT-LABEL: @multiple_structs(
153
154; R600-NOT: MOVA_INT
155; SI-NOT: v_movrel
156; SI-NOT: v_movrel
157%struct.point = type { i32, i32 }
158
159define amdgpu_kernel void @multiple_structs(ptr addrspace(1) %out) #0 {
160entry:
161  %a = alloca %struct.point, addrspace(5)
162  %b = alloca %struct.point, addrspace(5)
163  %a.y.ptr = getelementptr %struct.point, ptr addrspace(5) %a, i32 0, i32 1
164  %b.y.ptr = getelementptr %struct.point, ptr addrspace(5) %b, i32 0, i32 1
165  store i32 0, ptr addrspace(5) %a
166  store i32 1, ptr addrspace(5) %a.y.ptr
167  store i32 2, ptr addrspace(5) %b
168  store i32 3, ptr addrspace(5) %b.y.ptr
169  %a.indirect = load i32, ptr addrspace(5) %a
170  %b.indirect = load i32, ptr addrspace(5) %b
171  %0 = add i32 %a.indirect, %b.indirect
172  store i32 %0, ptr addrspace(1) %out
173  ret void
174}
175
176; Test direct access of a private array inside a loop.  The private array
177; loads and stores should be lowered to copies, so there shouldn't be any
178; MOVA instructions.
179
180; FUNC-LABEL: {{^}}direct_loop:
181; R600-NOT: MOVA_INT
182; SI-NOT: v_movrel
183
184define amdgpu_kernel void @direct_loop(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
185entry:
186  %prv_array_const = alloca [2 x i32], addrspace(5)
187  %prv_array = alloca [2 x i32], addrspace(5)
188  %a = load i32, ptr addrspace(1) %in
189  %b_src_ptr = getelementptr inbounds i32, ptr addrspace(1) %in, i32 1
190  %b = load i32, ptr addrspace(1) %b_src_ptr
191  store i32 %a, ptr addrspace(5) %prv_array_const
192  %b_dst_ptr = getelementptr inbounds [2 x i32], ptr addrspace(5) %prv_array_const, i32 0, i32 1
193  store i32 %b, ptr addrspace(5) %b_dst_ptr
194  br label %for.body
195
196for.body:
197  %inc = phi i32 [0, %entry], [%count, %for.body]
198  %x = load i32, ptr addrspace(5) %prv_array_const
199  %y = load i32, ptr addrspace(5) %prv_array
200  %xy = add i32 %x, %y
201  store i32 %xy, ptr addrspace(5) %prv_array
202  %count = add i32 %inc, 1
203  %done = icmp eq i32 %count, 4095
204  br i1 %done, label %for.end, label %for.body
205
206for.end:
207  %value = load i32, ptr addrspace(5) %prv_array
208  store i32 %value, ptr addrspace(1) %out
209  ret void
210}
211
212; FUNC-LABEL: {{^}}short_array:
213
214; R600-VECT: MOVA_INT
215
216; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:6 ; encoding: [0x06,0x00,0x68,0xe0
217; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ; encoding: [0x04,0x00,0x68,0xe0
218; Loaded value is 0 or 1, so sext will become zext, so we get buffer_load_ushort instead of buffer_load_sshort.
219; SI-ALLOCA: buffer_load_sshort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0
220
221; SI-PROMOTE-VECT: s_load_dword [[IDX:s[0-9]+]]
222; SI-PROMOTE-VECT: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 4
223; SI-PROMOTE-VECT: s_lshr_b32 [[SREG:s[0-9]+]], 0x10000, [[SCALED_IDX]]
224; SI-PROMOTE-VECT: s_and_b32 s{{[0-9]+}}, [[SREG]], 1
225define amdgpu_kernel void @short_array(ptr addrspace(1) %out, i32 %index) #0 {
226entry:
227  %0 = alloca [2 x i16], addrspace(5)
228  %1 = getelementptr inbounds [2 x i16], ptr addrspace(5) %0, i32 0, i32 1
229  store i16 0, ptr addrspace(5) %0
230  store i16 1, ptr addrspace(5) %1
231  %2 = getelementptr inbounds [2 x i16], ptr addrspace(5) %0, i32 0, i32 %index
232  %3 = load i16, ptr addrspace(5) %2
233  %4 = sext i16 %3 to i32
234  store i32 %4, ptr addrspace(1) %out
235  ret void
236}
237
238; FUNC-LABEL: {{^}}char_array:
239
240; R600-VECT: MOVA_INT
241
242; SI-PROMOTE-VECT-DAG: s_lshl_b32
243; SI-PROMOTE-VECT-DAG: v_lshrrev
244
245; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ; encoding: [0x04,0x00,0x60,0xe0
246; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:5 ; encoding: [0x05,0x00,0x60,0xe0
247define amdgpu_kernel void @char_array(ptr addrspace(1) %out, i32 %index) #0 {
248entry:
249  %0 = alloca [2 x i8], addrspace(5)
250  %1 = getelementptr inbounds [2 x i8], ptr addrspace(5) %0, i32 0, i32 1
251  store i8 0, ptr addrspace(5) %0
252  store i8 1, ptr addrspace(5) %1
253  %2 = getelementptr inbounds [2 x i8], ptr addrspace(5) %0, i32 0, i32 %index
254  %3 = load i8, ptr addrspace(5) %2
255  %4 = sext i8 %3 to i32
256  store i32 %4, ptr addrspace(1) %out
257  ret void
258}
259
260; Test that two stack objects are not stored in the same register
261; The second stack object should be in T3.X
262; FUNC-LABEL: {{^}}no_overlap:
263;
264; A total of 5 bytes should be allocated and used.
265; SI: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], 0 offset:4 ;
266define amdgpu_kernel void @no_overlap(ptr addrspace(1) %out, i32 %in) #0 {
267entry:
268  %0 = alloca [3 x i8], align 1, addrspace(5)
269  %1 = alloca [2 x i8], align 1, addrspace(5)
270  %2 = getelementptr [3 x i8], ptr addrspace(5) %0, i32 0, i32 1
271  %3 = getelementptr [3 x i8], ptr addrspace(5) %0, i32 0, i32 2
272  %4 = getelementptr [2 x i8], ptr addrspace(5) %1, i32 0, i32 1
273  store i8 0, ptr addrspace(5) %0
274  store i8 1, ptr addrspace(5) %2
275  store i8 2, ptr addrspace(5) %3
276  store i8 1, ptr addrspace(5) %1
277  store i8 0, ptr addrspace(5) %4
278  %5 = getelementptr [3 x i8], ptr addrspace(5) %0, i32 0, i32 %in
279  %6 = getelementptr [2 x i8], ptr addrspace(5) %1, i32 0, i32 %in
280  %7 = load i8, ptr addrspace(5) %5
281  %8 = load i8, ptr addrspace(5) %6
282  %9 = add i8 %7, %8
283  %10 = sext i8 %9 to i32
284  store i32 %10, ptr addrspace(1) %out
285  ret void
286}
287
288define amdgpu_kernel void @char_array_array(ptr addrspace(1) %out, i32 %index) #0 {
289entry:
290  %alloca = alloca [2 x [2 x i8]], addrspace(5)
291  %gep1 = getelementptr [2 x [2 x i8]], ptr addrspace(5) %alloca, i32 0, i32 0, i32 1
292  store i8 0, ptr addrspace(5) %alloca
293  store i8 1, ptr addrspace(5) %gep1
294  %gep2 = getelementptr [2 x [2 x i8]], ptr addrspace(5) %alloca, i32 0, i32 0, i32 %index
295  %load = load i8, ptr addrspace(5) %gep2
296  %sext = sext i8 %load to i32
297  store i32 %sext, ptr addrspace(1) %out
298  ret void
299}
300
301define amdgpu_kernel void @i32_array_array(ptr addrspace(1) %out, i32 %index) #0 {
302entry:
303  %alloca = alloca [2 x [2 x i32]], addrspace(5)
304  %gep1 = getelementptr [2 x [2 x i32]], ptr addrspace(5) %alloca, i32 0, i32 0, i32 1
305  store i32 0, ptr addrspace(5) %alloca
306  store i32 1, ptr addrspace(5) %gep1
307  %gep2 = getelementptr [2 x [2 x i32]], ptr addrspace(5) %alloca, i32 0, i32 0, i32 %index
308  %load = load i32, ptr addrspace(5) %gep2
309  store i32 %load, ptr addrspace(1) %out
310  ret void
311}
312
313define amdgpu_kernel void @i64_array_array(ptr addrspace(1) %out, i32 %index) #0 {
314entry:
315  %alloca = alloca [2 x [2 x i64]], addrspace(5)
316  %gep1 = getelementptr [2 x [2 x i64]], ptr addrspace(5) %alloca, i32 0, i32 0, i32 1
317  store i64 0, ptr addrspace(5) %alloca
318  store i64 1, ptr addrspace(5) %gep1
319  %gep2 = getelementptr [2 x [2 x i64]], ptr addrspace(5) %alloca, i32 0, i32 0, i32 %index
320  %load = load i64, ptr addrspace(5) %gep2
321  store i64 %load, ptr addrspace(1) %out
322  ret void
323}
324
325%struct.pair32 = type { i32, i32 }
326
327define amdgpu_kernel void @struct_array_array(ptr addrspace(1) %out, i32 %index) #0 {
328entry:
329  %alloca = alloca [2 x [2 x %struct.pair32]], addrspace(5)
330  %gep0 = getelementptr [2 x [2 x %struct.pair32]], ptr addrspace(5) %alloca, i32 0, i32 0, i32 0, i32 1
331  %gep1 = getelementptr [2 x [2 x %struct.pair32]], ptr addrspace(5) %alloca, i32 0, i32 0, i32 1, i32 1
332  store i32 0, ptr addrspace(5) %gep0
333  store i32 1, ptr addrspace(5) %gep1
334  %gep2 = getelementptr [2 x [2 x %struct.pair32]], ptr addrspace(5) %alloca, i32 0, i32 0, i32 %index, i32 0
335  %load = load i32, ptr addrspace(5) %gep2
336  store i32 %load, ptr addrspace(1) %out
337  ret void
338}
339
340define amdgpu_kernel void @struct_pair32_array(ptr addrspace(1) %out, i32 %index) #0 {
341entry:
342  %alloca = alloca [2 x %struct.pair32], addrspace(5)
343  %gep0 = getelementptr [2 x %struct.pair32], ptr addrspace(5) %alloca, i32 0, i32 0, i32 1
344  %gep1 = getelementptr [2 x %struct.pair32], ptr addrspace(5) %alloca, i32 0, i32 1, i32 0
345  store i32 0, ptr addrspace(5) %gep0
346  store i32 1, ptr addrspace(5) %gep1
347  %gep2 = getelementptr [2 x %struct.pair32], ptr addrspace(5) %alloca, i32 0, i32 %index, i32 0
348  %load = load i32, ptr addrspace(5) %gep2
349  store i32 %load, ptr addrspace(1) %out
350  ret void
351}
352
353define amdgpu_kernel void @select_private(ptr addrspace(1) %out, i32 %in) nounwind {
354entry:
355  %tmp = alloca [2 x i32], addrspace(5)
356  %tmp2 = getelementptr [2 x i32], ptr addrspace(5) %tmp, i32 0, i32 1
357  store i32 0, ptr addrspace(5) %tmp
358  store i32 1, ptr addrspace(5) %tmp2
359  %cmp = icmp eq i32 %in, 0
360  %sel = select i1 %cmp, ptr addrspace(5) %tmp, ptr addrspace(5) %tmp2
361  %load = load i32, ptr addrspace(5) %sel
362  store i32 %load, ptr addrspace(1) %out
363  ret void
364}
365
366; AMDGPUPromoteAlloca does not know how to handle ptrtoint.  When it
367; finds one, it should stop trying to promote.
368
369; FUNC-LABEL: ptrtoint:
370; SI-NOT: ds_write
371; SI: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen
372; SI: v_add_{{[iu]}}32_e32 [[ADD_OFFSET:v[0-9]+]], vcc, 5,
373; SI: buffer_load_dword v{{[0-9]+}}, [[ADD_OFFSET:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0 offen ;
374define amdgpu_kernel void @ptrtoint(ptr addrspace(1) %out, i32 %a, i32 %b) #0 {
375  %alloca = alloca [16 x i32], addrspace(5)
376  %tmp0 = getelementptr [16 x i32], ptr addrspace(5) %alloca, i32 0, i32 %a
377  store i32 5, ptr addrspace(5) %tmp0
378  %tmp1 = ptrtoint ptr addrspace(5) %alloca to i32
379  %tmp2 = add i32 %tmp1, 5
380  %tmp3 = inttoptr i32 %tmp2 to ptr addrspace(5)
381  %tmp4 = getelementptr i32, ptr addrspace(5) %tmp3, i32 %b
382  %tmp5 = load i32, ptr addrspace(5) %tmp4
383  store i32 %tmp5, ptr addrspace(1) %out
384  ret void
385}
386
387; OPT-LABEL: @pointer_typed_alloca(
388; OPT:  getelementptr inbounds [256 x ptr addrspace(1)], ptr addrspace(3) @pointer_typed_alloca.A.addr, i32 0, i32 %{{[0-9]+}}
389; OPT: load ptr addrspace(1), ptr addrspace(3) %{{[0-9]+}}, align 4
390define amdgpu_kernel void @pointer_typed_alloca(ptr addrspace(1) %A) #1 {
391entry:
392  %A.addr = alloca ptr addrspace(1), align 4, addrspace(5)
393  store ptr addrspace(1) %A, ptr addrspace(5) %A.addr, align 4
394  %ld0 = load ptr addrspace(1), ptr addrspace(5) %A.addr, align 4
395  store i32 1, ptr addrspace(1) %ld0, align 4
396  %ld1 = load ptr addrspace(1), ptr addrspace(5) %A.addr, align 4
397  %arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %ld1, i32 1
398  store i32 2, ptr addrspace(1) %arrayidx1, align 4
399  %ld2 = load ptr addrspace(1), ptr addrspace(5) %A.addr, align 4
400  %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %ld2, i32 2
401  store i32 3, ptr addrspace(1) %arrayidx2, align 4
402  ret void
403}
404
405; FUNC-LABEL: v16i32_stack:
406
407; R600: MOVA_INT
408; R600: MOVA_INT
409; R600: MOVA_INT
410; R600: MOVA_INT
411; R600: MOVA_INT
412; R600: MOVA_INT
413; R600: MOVA_INT
414; R600: MOVA_INT
415; R600: MOVA_INT
416; R600: MOVA_INT
417; R600: MOVA_INT
418; R600: MOVA_INT
419; R600: MOVA_INT
420; R600: MOVA_INT
421; R600: MOVA_INT
422; R600: MOVA_INT
423
424; SI: buffer_load_dword
425; SI: buffer_load_dword
426; SI: buffer_load_dword
427; SI: buffer_load_dword
428; SI: buffer_load_dword
429; SI: buffer_load_dword
430; SI: buffer_load_dword
431; SI: buffer_load_dword
432; SI: buffer_load_dword
433; SI: buffer_load_dword
434; SI: buffer_load_dword
435; SI: buffer_load_dword
436; SI: buffer_load_dword
437; SI: buffer_load_dword
438; SI: buffer_load_dword
439; SI: buffer_load_dword
440
441define amdgpu_kernel void @v16i32_stack(ptr addrspace(1) %out, i32 %a) {
442  %alloca = alloca [2 x <16 x i32>], addrspace(5)
443  %tmp0 = getelementptr [2 x <16 x i32>], ptr addrspace(5) %alloca, i32 0, i32 %a
444  %tmp5 = load <16 x i32>, ptr addrspace(5) %tmp0
445  store <16 x i32> %tmp5, ptr addrspace(1) %out
446  ret void
447}
448
449; FUNC-LABEL: v16float_stack:
450
451; R600: MOVA_INT
452; R600: MOVA_INT
453; R600: MOVA_INT
454; R600: MOVA_INT
455; R600: MOVA_INT
456; R600: MOVA_INT
457; R600: MOVA_INT
458; R600: MOVA_INT
459; R600: MOVA_INT
460; R600: MOVA_INT
461; R600: MOVA_INT
462; R600: MOVA_INT
463; R600: MOVA_INT
464; R600: MOVA_INT
465; R600: MOVA_INT
466; R600: MOVA_INT
467
468; SI: buffer_load_dword
469; SI: buffer_load_dword
470; SI: buffer_load_dword
471; SI: buffer_load_dword
472; SI: buffer_load_dword
473; SI: buffer_load_dword
474; SI: buffer_load_dword
475; SI: buffer_load_dword
476; SI: buffer_load_dword
477; SI: buffer_load_dword
478; SI: buffer_load_dword
479; SI: buffer_load_dword
480; SI: buffer_load_dword
481; SI: buffer_load_dword
482; SI: buffer_load_dword
483; SI: buffer_load_dword
484
485define amdgpu_kernel void @v16float_stack(ptr addrspace(1) %out, i32 %a) {
486  %alloca = alloca [2 x <16 x float>], addrspace(5)
487  %tmp0 = getelementptr [2 x <16 x float>], ptr addrspace(5) %alloca, i32 0, i32 %a
488  %tmp5 = load <16 x float>, ptr addrspace(5) %tmp0
489  store <16 x float> %tmp5, ptr addrspace(1) %out
490  ret void
491}
492
493; FUNC-LABEL: v2float_stack:
494
495; R600: MOVA_INT
496; R600: MOVA_INT
497
498; SI: buffer_load_dword
499; SI: buffer_load_dword
500
501define amdgpu_kernel void @v2float_stack(ptr addrspace(1) %out, i32 %a) {
502  %alloca = alloca [16 x <2 x float>], addrspace(5)
503  %tmp0 = getelementptr [16 x <2 x float>], ptr addrspace(5) %alloca, i32 0, i32 %a
504  %tmp5 = load <2 x float>, ptr addrspace(5) %tmp0
505  store <2 x float> %tmp5, ptr addrspace(1) %out
506  ret void
507}
508
509; OPT-LABEL: @direct_alloca_read_0xi32(
510; OPT: store [0 x i32] undef, ptr addrspace(3)
511; OPT: load [0 x i32], ptr addrspace(3)
512define amdgpu_kernel void @direct_alloca_read_0xi32(ptr addrspace(1) %out, i32 %index) {
513entry:
514  %tmp = alloca [0 x i32], addrspace(5)
515  store [0 x i32] [], ptr addrspace(5) %tmp
516  %load = load [0 x i32], ptr addrspace(5) %tmp
517  store [0 x i32] %load, ptr addrspace(1) %out
518  ret void
519}
520
521; OPT-LABEL: @direct_alloca_read_1xi32(
522; OPT: store [1 x i32] zeroinitializer, ptr addrspace(3)
523; OPT: load [1 x i32], ptr addrspace(3)
524define amdgpu_kernel void @direct_alloca_read_1xi32(ptr addrspace(1) %out, i32 %index) {
525entry:
526  %tmp = alloca [1 x i32], addrspace(5)
527  store [1 x i32] [i32 0], ptr addrspace(5) %tmp
528  %load = load [1 x i32], ptr addrspace(5) %tmp
529  store [1 x i32] %load, ptr addrspace(1) %out
530  ret void
531}
532
533attributes #0 = { nounwind "amdgpu-waves-per-eu"="1,2" "amdgpu-flat-work-group-size"="1,256" }
534attributes #1 = { nounwind "amdgpu-flat-work-group-size"="1,256" }
535
536!llvm.module.flags = !{!99}
537!99 = !{i32 1, !"amdgpu_code_object_version", i32 CODE_OBJECT_VERSION}
538
539; HSAOPT: !1 = !{}
540; HSAOPT: !2 = !{i32 0, i32 257}
541; HSAOPT: !3 = !{i32 0, i32 256}
542
543; NOHSAOPT: !1 = !{i32 0, i32 257}
544; NOHSAOPT: !2 = !{i32 0, i32 256}
545