xref: /llvm-project/llvm/test/CodeGen/AMDGPU/implicitarg-offset-attributes.ll (revision 7dbd6cd2946ec3a9b4ad2dfd7ead177baac15bd7)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes --check-globals
2; RUN: sed 's/CODE_OBJECT_VERSION/400/g' %s | opt -S -mtriple=amdgcn-unknown-unknown -passes=amdgpu-attributor | FileCheck -check-prefixes=CHECK,V4 %s
3; RUN: sed 's/CODE_OBJECT_VERSION/500/g' %s | opt -S -mtriple=amdgcn-unknown-unknown -passes=amdgpu-attributor | FileCheck -check-prefixes=CHECK,V5 %s
4; RUN: sed 's/CODE_OBJECT_VERSION/600/g' %s | opt -S -mtriple=amdgcn-unknown-unknown -passes=amdgpu-attributor | FileCheck -check-prefixes=CHECK,V6 %s
5
6declare ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() #0
7
8declare i32 @llvm.amdgcn.workgroup.id.x() #0
9declare i32 @llvm.amdgcn.workgroup.id.y() #0
10declare i32 @llvm.amdgcn.workgroup.id.z() #0
11
12declare i32 @llvm.amdgcn.workitem.id.x() #0
13declare i32 @llvm.amdgcn.workitem.id.y() #0
14declare i32 @llvm.amdgcn.workitem.id.z() #0
15declare i32 @llvm.amdgcn.lds.kernel.id() #0
16declare i64 @llvm.amdgcn.dispatch.id() #0
17
18
19declare ptr addrspace(4) @llvm.amdgcn.dispatch.ptr() #0
20declare ptr addrspace(4) @llvm.amdgcn.queue.ptr() #0
21declare ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr() #0
22
23; Avoid adding all of these to the output attribute sets
24define void @use_everything_else() {
25; CHECK-LABEL: define {{[^@]+}}@use_everything_else
26; CHECK-SAME: () #[[ATTR1:[0-9]+]] {
27; CHECK-NEXT:    [[VAL0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
28; CHECK-NEXT:    [[VAL1:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
29; CHECK-NEXT:    [[VAL2:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
30; CHECK-NEXT:    [[VAL3:%.*]] = call i32 @llvm.amdgcn.workgroup.id.x()
31; CHECK-NEXT:    [[VAL4:%.*]] = call i32 @llvm.amdgcn.workgroup.id.y()
32; CHECK-NEXT:    [[VAL5:%.*]] = call i32 @llvm.amdgcn.workgroup.id.z()
33; CHECK-NEXT:    store volatile i32 [[VAL0]], ptr addrspace(1) null, align 4
34; CHECK-NEXT:    store volatile i32 [[VAL1]], ptr addrspace(1) null, align 4
35; CHECK-NEXT:    store volatile i32 [[VAL2]], ptr addrspace(1) null, align 4
36; CHECK-NEXT:    store volatile i32 [[VAL3]], ptr addrspace(1) null, align 4
37; CHECK-NEXT:    store volatile i32 [[VAL4]], ptr addrspace(1) null, align 4
38; CHECK-NEXT:    store volatile i32 [[VAL5]], ptr addrspace(1) null, align 4
39; CHECK-NEXT:    [[DISPATCH_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
40; CHECK-NEXT:    [[QUEUE_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
41; CHECK-NEXT:    [[VAL6:%.*]] = load volatile ptr, ptr addrspace(4) [[DISPATCH_PTR]], align 8
42; CHECK-NEXT:    [[VAL7:%.*]] = load volatile ptr, ptr addrspace(4) [[QUEUE_PTR]], align 8
43; CHECK-NEXT:    [[VAL8:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
44; CHECK-NEXT:    store volatile i32 [[VAL8]], ptr addrspace(1) null, align 4
45; CHECK-NEXT:    [[VAL9:%.*]] = call i64 @llvm.amdgcn.dispatch.id()
46; CHECK-NEXT:    store volatile i64 [[VAL9]], ptr addrspace(1) null, align 8
47; CHECK-NEXT:    ret void
48;
49  %val0 = call i32 @llvm.amdgcn.workitem.id.x()
50  %val1 = call i32 @llvm.amdgcn.workitem.id.y()
51  %val2 = call i32 @llvm.amdgcn.workitem.id.z()
52  %val3 = call i32 @llvm.amdgcn.workgroup.id.x()
53  %val4 = call i32 @llvm.amdgcn.workgroup.id.y()
54  %val5 = call i32 @llvm.amdgcn.workgroup.id.z()
55  store volatile i32 %val0, ptr addrspace(1) null
56  store volatile i32 %val1, ptr addrspace(1) null
57  store volatile i32 %val2, ptr addrspace(1) null
58  store volatile i32 %val3, ptr addrspace(1) null
59  store volatile i32 %val4, ptr addrspace(1) null
60  store volatile i32 %val5, ptr addrspace(1) null
61  %dispatch.ptr = call ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
62  %queue.ptr = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
63  %val6 = load volatile ptr, ptr addrspace(4) %dispatch.ptr
64  %val7 = load volatile ptr, ptr addrspace(4) %queue.ptr
65  %val8 = call i32 @llvm.amdgcn.lds.kernel.id()
66  store volatile i32 %val8, ptr addrspace(1) null
67  %val9 = call i64 @llvm.amdgcn.dispatch.id()
68  store volatile i64 %val9, ptr addrspace(1) null
69  ret void
70}
71
72define amdgpu_kernel void @test_default_queue_offset_v4_0(ptr addrspace(1) %kernarg) {
73; V4-LABEL: define {{[^@]+}}@test_default_queue_offset_v4_0
74; V4-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR2:[0-9]+]] {
75; V4-NEXT:    call void @use_everything_else()
76; V4-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
77; V4-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 32
78; V4-NEXT:    [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[GEP]], align 8
79; V4-NEXT:    store ptr [[LOAD]], ptr addrspace(1) [[KERNARG]], align 8
80; V4-NEXT:    ret void
81;
82; V5-LABEL: define {{[^@]+}}@test_default_queue_offset_v4_0
83; V5-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR1]] {
84; V5-NEXT:    call void @use_everything_else()
85; V5-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
86; V5-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 32
87; V5-NEXT:    [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[GEP]], align 8
88; V5-NEXT:    store ptr [[LOAD]], ptr addrspace(1) [[KERNARG]], align 8
89; V5-NEXT:    ret void
90;
91; V6-LABEL: define {{[^@]+}}@test_default_queue_offset_v4_0
92; V6-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR1]] {
93; V6-NEXT:    call void @use_everything_else()
94; V6-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
95; V6-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 32
96; V6-NEXT:    [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[GEP]], align 8
97; V6-NEXT:    store ptr [[LOAD]], ptr addrspace(1) [[KERNARG]], align 8
98; V6-NEXT:    ret void
99;
100  call void @use_everything_else()
101  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
102  %gep = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 32
103  %load = load ptr, ptr addrspace(4) %gep
104  store ptr %load, ptr addrspace(1) %kernarg
105  ret void
106}
107
108define amdgpu_kernel void @test_default_queue_offset_v5_0(ptr addrspace(1) %kernarg) {
109; V4-LABEL: define {{[^@]+}}@test_default_queue_offset_v5_0
110; V4-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR3:[0-9]+]] {
111; V4-NEXT:    call void @use_everything_else()
112; V4-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
113; V4-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 104
114; V4-NEXT:    [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[GEP]], align 8
115; V4-NEXT:    store ptr [[LOAD]], ptr addrspace(1) [[KERNARG]], align 8
116; V4-NEXT:    ret void
117;
118; V5-LABEL: define {{[^@]+}}@test_default_queue_offset_v5_0
119; V5-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR2:[0-9]+]] {
120; V5-NEXT:    call void @use_everything_else()
121; V5-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
122; V5-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 104
123; V5-NEXT:    [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[GEP]], align 8
124; V5-NEXT:    store ptr [[LOAD]], ptr addrspace(1) [[KERNARG]], align 8
125; V5-NEXT:    ret void
126;
127; V6-LABEL: define {{[^@]+}}@test_default_queue_offset_v5_0
128; V6-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR2:[0-9]+]] {
129; V6-NEXT:    call void @use_everything_else()
130; V6-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
131; V6-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 104
132; V6-NEXT:    [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[GEP]], align 8
133; V6-NEXT:    store ptr [[LOAD]], ptr addrspace(1) [[KERNARG]], align 8
134; V6-NEXT:    ret void
135;
136  call void @use_everything_else()
137  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
138  %gep = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 104
139  %load = load ptr, ptr addrspace(4) %gep
140  store ptr %load, ptr addrspace(1) %kernarg
141  ret void
142}
143
144define amdgpu_kernel void @test_completion_action_offset_v4_0(ptr addrspace(1) %kernarg) {
145; V4-LABEL: define {{[^@]+}}@test_completion_action_offset_v4_0
146; V4-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR4:[0-9]+]] {
147; V4-NEXT:    call void @use_everything_else()
148; V4-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
149; V4-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 40
150; V4-NEXT:    [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[GEP]], align 8
151; V4-NEXT:    store ptr [[LOAD]], ptr addrspace(1) [[KERNARG]], align 8
152; V4-NEXT:    ret void
153;
154; V5-LABEL: define {{[^@]+}}@test_completion_action_offset_v4_0
155; V5-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR1]] {
156; V5-NEXT:    call void @use_everything_else()
157; V5-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
158; V5-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 40
159; V5-NEXT:    [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[GEP]], align 8
160; V5-NEXT:    store ptr [[LOAD]], ptr addrspace(1) [[KERNARG]], align 8
161; V5-NEXT:    ret void
162;
163; V6-LABEL: define {{[^@]+}}@test_completion_action_offset_v4_0
164; V6-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR1]] {
165; V6-NEXT:    call void @use_everything_else()
166; V6-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
167; V6-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 40
168; V6-NEXT:    [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[GEP]], align 8
169; V6-NEXT:    store ptr [[LOAD]], ptr addrspace(1) [[KERNARG]], align 8
170; V6-NEXT:    ret void
171;
172  call void @use_everything_else()
173  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
174  %gep = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 40
175  %load = load ptr, ptr addrspace(4) %gep
176  store ptr %load, ptr addrspace(1) %kernarg
177  ret void
178}
179
180define amdgpu_kernel void @test_completion_action_offset_v5_0(ptr addrspace(1) %kernarg) {
181; CHECK-LABEL: define {{[^@]+}}@test_completion_action_offset_v5_0
182; CHECK-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR3:[0-9]+]] {
183; CHECK-NEXT:    call void @use_everything_else()
184; CHECK-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
185; CHECK-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 112
186; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr addrspace(4) [[GEP]], align 8
187; CHECK-NEXT:    store ptr [[LOAD]], ptr addrspace(1) [[KERNARG]], align 8
188; CHECK-NEXT:    ret void
189;
190  call void @use_everything_else()
191  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
192  %gep = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 112
193  %load = load ptr, ptr addrspace(4) %gep
194  store ptr %load, ptr addrspace(1) %kernarg
195  ret void
196}
197
198define amdgpu_kernel void @test_default_queue_completion_action_offset_v3_0(ptr addrspace(1) %kernarg) {
199; V4-LABEL: define {{[^@]+}}@test_default_queue_completion_action_offset_v3_0
200; V4-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR5:[0-9]+]] {
201; V4-NEXT:    call void @use_everything_else()
202; V4-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
203; V4-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 32
204; V4-NEXT:    [[LOAD:%.*]] = load <2 x ptr>, ptr addrspace(4) [[GEP]], align 16
205; V4-NEXT:    store <2 x ptr> [[LOAD]], ptr addrspace(1) [[KERNARG]], align 16
206; V4-NEXT:    ret void
207;
208; V5-LABEL: define {{[^@]+}}@test_default_queue_completion_action_offset_v3_0
209; V5-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR1]] {
210; V5-NEXT:    call void @use_everything_else()
211; V5-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
212; V5-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 32
213; V5-NEXT:    [[LOAD:%.*]] = load <2 x ptr>, ptr addrspace(4) [[GEP]], align 16
214; V5-NEXT:    store <2 x ptr> [[LOAD]], ptr addrspace(1) [[KERNARG]], align 16
215; V5-NEXT:    ret void
216;
217; V6-LABEL: define {{[^@]+}}@test_default_queue_completion_action_offset_v3_0
218; V6-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR1]] {
219; V6-NEXT:    call void @use_everything_else()
220; V6-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
221; V6-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 32
222; V6-NEXT:    [[LOAD:%.*]] = load <2 x ptr>, ptr addrspace(4) [[GEP]], align 16
223; V6-NEXT:    store <2 x ptr> [[LOAD]], ptr addrspace(1) [[KERNARG]], align 16
224; V6-NEXT:    ret void
225;
226  call void @use_everything_else()
227  %implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
228  %gep = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 32
229  %load = load <2 x ptr>, ptr addrspace(4) %gep
230  store <2 x ptr> %load, ptr addrspace(1) %kernarg
231  ret void
232}
233
234define amdgpu_kernel void @test_default_queue_completion_action_offset_v5_0(ptr addrspace(1) %kernarg) {
235; V4-LABEL: define {{[^@]+}}@test_default_queue_completion_action_offset_v5_0
236; V4-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR3]] {
237; V4-NEXT:    call void @use_everything_else()
238; V4-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
239; V4-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 104
240; V4-NEXT:    [[LOAD:%.*]] = load <2 x ptr>, ptr addrspace(4) [[GEP]], align 16
241; V4-NEXT:    store <2 x ptr> [[LOAD]], ptr addrspace(1) [[KERNARG]], align 16
242; V4-NEXT:    ret void
243;
244; V5-LABEL: define {{[^@]+}}@test_default_queue_completion_action_offset_v5_0
245; V5-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR4:[0-9]+]] {
246; V5-NEXT:    call void @use_everything_else()
247; V5-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
248; V5-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 104
249; V5-NEXT:    [[LOAD:%.*]] = load <2 x ptr>, ptr addrspace(4) [[GEP]], align 16
250; V5-NEXT:    store <2 x ptr> [[LOAD]], ptr addrspace(1) [[KERNARG]], align 16
251; V5-NEXT:    ret void
252;
253; V6-LABEL: define {{[^@]+}}@test_default_queue_completion_action_offset_v5_0
254; V6-SAME: (ptr addrspace(1) [[KERNARG:%.*]]) #[[ATTR4:[0-9]+]] {
255; V6-NEXT:    call void @use_everything_else()
256; V6-NEXT:    [[IMPLICITARG_PTR:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
257; V6-NEXT:    [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(4) [[IMPLICITARG_PTR]], i64 104
258; V6-NEXT:    [[LOAD:%.*]] = load <2 x ptr>, ptr addrspace(4) [[GEP]], align 16
259; V6-NEXT:    store <2 x ptr> [[LOAD]], ptr addrspace(1) [[KERNARG]], align 16
260; V6-NEXT:    ret void
261;
262
263  call void @use_everything_else()%implicitarg.ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
264  %gep = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 104
265  %load = load <2 x ptr>, ptr addrspace(4) %gep
266  store <2 x ptr> %load, ptr addrspace(1) %kernarg
267  ret void
268}
269
270
271attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
272
273!llvm.module.flags = !{!0}
274!0 = !{i32 1, !"amdhsa_code_object_version", i32 CODE_OBJECT_VERSION}
275
276
277;.
278; V4: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
279; V4: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
280; V4: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
281; V4: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
282; V4: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
283; V4: attributes #[[ATTR5]] = { "amdgpu-no-agpr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
284;.
285; V5: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
286; V5: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
287; V5: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
288; V5: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
289; V5: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
290;.
291; V6: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
292; V6: attributes #[[ATTR1]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
293; V6: attributes #[[ATTR2]] = { "amdgpu-no-agpr" "amdgpu-no-completion-action" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
294; V6: attributes #[[ATTR3]] = { "amdgpu-no-agpr" "amdgpu-no-default-queue" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
295; V6: attributes #[[ATTR4]] = { "amdgpu-no-agpr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-multigrid-sync-arg" "uniform-work-group-size"="false" }
296;.
297; V4: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 400}
298;.
299; V5: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 500}
300;.
301; V6: [[META0:![0-9]+]] = !{i32 1, !"amdhsa_code_object_version", i32 600}
302;.
303