xref: /llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir (revision 87503fa51c8d726510d48e707a7d2885a5b5936c)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=amdgcn -mcpu=hawaii -mattr=+flat-for-global -run-pass=regbankselect %s -verify-machineinstrs -o - | FileCheck %s
3
4--- |
5  define amdgpu_kernel void @load_constant(ptr addrspace(4) %ptr0) {
6    ret void
7  }
8
9  define amdgpu_kernel void @load_constant_volatile(ptr addrspace(4) %ptr0) {
10    ret void
11  }
12
13  define amdgpu_kernel void @load_global_uniform_invariant(ptr addrspace(1) %ptr1) {
14    %tmp0 = load i32, ptr addrspace(1) %ptr1
15    ret void
16  }
17
18  define amdgpu_kernel void @load_global_uniform_noclobber(ptr addrspace(1) %ptr1) {
19    %tmp0 = load i32, ptr addrspace(1) %ptr1, !amdgpu.noclobber !0
20    ret void
21  }
22
23  define amdgpu_kernel void @load_global_uniform_variant(ptr addrspace(1) %ptr1) {
24    %tmp0 = load i32, ptr addrspace(1) %ptr1
25    ret void
26  }
27
28  define amdgpu_kernel void @load_global_uniform_volatile_invariant(ptr addrspace(1) %ptr1) {
29    %tmp0 = load i32, ptr addrspace(1) %ptr1
30    ret void
31  }
32
33  define amdgpu_kernel void @load_global_uniform_atomic_invariant(ptr addrspace(1) %ptr1) {
34    %tmp0 = load i32, ptr addrspace(1) %ptr1
35    ret void
36  }
37
38  define amdgpu_kernel void @load_global_non_uniform(ptr addrspace(1) %ptr2) {
39    %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
40    %tmp1 = getelementptr i32, ptr addrspace(1) %ptr2, i32 %tmp0
41    %tmp2 = load i32, ptr addrspace(1) %tmp1
42    ret void
43  }
44
45  define amdgpu_kernel void @load_constant_v4i16_from_8_align8(ptr addrspace(4) %ptr0) {
46    ret void
47  }
48
49  declare i32 @llvm.amdgcn.workitem.id.x() #0
50  attributes #0 = { nounwind readnone }
51  !0 = !{}
52
53...
54---
55name: load_constant
56legalized: true
57
58body: |
59  bb.0:
60    liveins: $sgpr0_sgpr1
61    ; CHECK-LABEL: name: load_constant
62    ; CHECK: liveins: $sgpr0_sgpr1
63    ; CHECK-NEXT: {{  $}}
64    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
65    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:sgpr(s32) = G_LOAD [[COPY]](p4) :: (load (s32) from %ir.ptr0, addrspace 4)
66    %0:_(p4) = COPY $sgpr0_sgpr1
67    %1:_(s32) = G_LOAD %0 :: (load (s32) from %ir.ptr0)
68...
69
70---
71name: load_constant_volatile
72legalized: true
73
74body: |
75  bb.0:
76    liveins: $sgpr0_sgpr1
77    ; CHECK-LABEL: name: load_constant_volatile
78    ; CHECK: liveins: $sgpr0_sgpr1
79    ; CHECK-NEXT: {{  $}}
80    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
81    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:sgpr(s32) = G_LOAD [[COPY]](p4) :: (volatile load (s32) from %ir.ptr0, addrspace 4)
82    %0:_(p4) = COPY $sgpr0_sgpr1
83    %1:_(s32) = G_LOAD %0 :: (volatile load (s32) from %ir.ptr0)
84...
85
86---
87name: load_global_uniform_invariant
88legalized: true
89
90body: |
91  bb.0:
92    liveins: $sgpr0_sgpr1
93    ; CHECK-LABEL: name: load_global_uniform_invariant
94    ; CHECK: liveins: $sgpr0_sgpr1
95    ; CHECK-NEXT: {{  $}}
96    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
97    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:sgpr(s32) = G_LOAD [[COPY]](p1) :: (invariant load (s32) from %ir.ptr1, addrspace 1)
98    %0:_(p1) = COPY $sgpr0_sgpr1
99    %1:_(s32) = G_LOAD %0 :: (invariant load (s32) from %ir.ptr1)
100...
101
102---
103name: load_global_uniform_noclobber
104legalized: true
105
106body: |
107  bb.0:
108    liveins: $sgpr0_sgpr1
109    ; CHECK-LABEL: name: load_global_uniform_noclobber
110    ; CHECK: liveins: $sgpr0_sgpr1
111    ; CHECK-NEXT: {{  $}}
112    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
113    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
114    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (load (s32) from %ir.ptr1, addrspace 1)
115    %0:_(p1) = COPY $sgpr0_sgpr1
116    %1:_(s32) = G_LOAD %0 :: (load (s32) from %ir.ptr1)
117...
118
119---
120name: load_global_uniform_variant
121legalized: true
122
123body: |
124  bb.0:
125    liveins: $sgpr0_sgpr1
126    ; CHECK-LABEL: name: load_global_uniform_variant
127    ; CHECK: liveins: $sgpr0_sgpr1
128    ; CHECK-NEXT: {{  $}}
129    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
130    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
131    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (load (s32) from %ir.ptr1, addrspace 1)
132    %0:_(p1) = COPY $sgpr0_sgpr1
133    %1:_(s32) = G_LOAD %0 :: (load (s32) from %ir.ptr1)
134...
135
136---
137name: load_global_uniform_volatile_invariant
138legalized: true
139
140body: |
141  bb.0:
142    liveins: $sgpr0_sgpr1
143    ; CHECK-LABEL: name: load_global_uniform_volatile_invariant
144    ; CHECK: liveins: $sgpr0_sgpr1
145    ; CHECK-NEXT: {{  $}}
146    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
147    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
148    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (volatile invariant load (s32) from %ir.ptr1, addrspace 1)
149    %0:_(p1) = COPY $sgpr0_sgpr1
150    %1:_(s32) = G_LOAD %0 :: (volatile invariant load (s32) from %ir.ptr1)
151...
152
153---
154name: load_global_uniform_atomic_invariant
155legalized: true
156
157body: |
158  bb.0:
159    liveins: $sgpr0_sgpr1
160    ; CHECK-LABEL: name: load_global_uniform_atomic_invariant
161    ; CHECK: liveins: $sgpr0_sgpr1
162    ; CHECK-NEXT: {{  $}}
163    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
164    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
165    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (invariant load acquire (s32) from %ir.ptr1, addrspace 1)
166    %0:_(p1) = COPY $sgpr0_sgpr1
167    %1:_(s32) = G_LOAD %0 :: (invariant load acquire (s32) from %ir.ptr1)
168...
169
170---
171name: load_global_non_uniform
172legalized: true
173
174body: |
175  bb.0:
176    liveins: $sgpr0_sgpr1
177    ; CHECK-LABEL: name: load_global_non_uniform
178    ; CHECK: liveins: $sgpr0_sgpr1
179    ; CHECK-NEXT: {{  $}}
180    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
181    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
182    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (load (s32) from %ir.tmp1, addrspace 1)
183    %0:_(p1) = COPY $sgpr0_sgpr1
184    %1:_(s32) = G_LOAD %0 :: (load (s32) from %ir.tmp1)
185...
186
187---
188name: load_constant_v4i16_from_8_align8
189legalized: true
190
191body: |
192  bb.0:
193    ; CHECK-LABEL: name: load_constant_v4i16_from_8_align8
194    ; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
195    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:sgpr(<4 x s16>) = G_LOAD [[COPY]](p4) :: (load (<4 x s16>) from %ir.ptr0, addrspace 4)
196    %0:_(p4) = COPY $sgpr0_sgpr1
197    %1:_(<4 x s16>) = G_LOAD %0 :: (load (<4 x s16>) from %ir.ptr0, align 8, addrspace 4)
198
199...
200