xref: /llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll (revision 6548b6354d1d990e1c98736f5e7c3de876bedc8e)
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s
3
4; FIXME: Also test with a pre-gfx8 target.
5
6define i1 @i1_func_void() #0 {
7  ; CHECK-LABEL: name: i1_func_void
8  ; CHECK: bb.1 (%ir-block.0):
9  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
10  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `ptr addrspace(1) undef`, addrspace 1)
11  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s1)
12  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
13  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
14  %val = load i1, ptr addrspace(1) undef
15  ret i1 %val
16}
17
18define zeroext i1 @i1_zeroext_func_void() #0 {
19  ; CHECK-LABEL: name: i1_zeroext_func_void
20  ; CHECK: bb.1 (%ir-block.0):
21  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
22  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `ptr addrspace(1) undef`, addrspace 1)
23  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
24  ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
25  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
26  %val = load i1, ptr addrspace(1) undef
27  ret i1 %val
28}
29
30define signext i1 @i1_signext_func_void() #0 {
31  ; CHECK-LABEL: name: i1_signext_func_void
32  ; CHECK: bb.1 (%ir-block.0):
33  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
34  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `ptr addrspace(1) undef`, addrspace 1)
35  ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s1)
36  ; CHECK-NEXT:   $vgpr0 = COPY [[SEXT]](s32)
37  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
38  %val = load i1, ptr addrspace(1) undef
39  ret i1 %val
40}
41
42define i7 @i7_func_void() #0 {
43  ; CHECK-LABEL: name: i7_func_void
44  ; CHECK: bb.1 (%ir-block.0):
45  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
46  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `ptr addrspace(1) undef`, addrspace 1)
47  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s7)
48  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
49  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
50  %val = load i7, ptr addrspace(1) undef
51  ret i7 %val
52}
53
54define zeroext i7 @i7_zeroext_func_void() #0 {
55  ; CHECK-LABEL: name: i7_zeroext_func_void
56  ; CHECK: bb.1 (%ir-block.0):
57  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
58  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `ptr addrspace(1) undef`, addrspace 1)
59  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s7)
60  ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
61  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
62  %val = load i7, ptr addrspace(1) undef
63  ret i7 %val
64}
65
66define signext i7 @i7_signext_func_void() #0 {
67  ; CHECK-LABEL: name: i7_signext_func_void
68  ; CHECK: bb.1 (%ir-block.0):
69  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
70  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `ptr addrspace(1) undef`, addrspace 1)
71  ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s7)
72  ; CHECK-NEXT:   $vgpr0 = COPY [[SEXT]](s32)
73  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
74  %val = load i7, ptr addrspace(1) undef
75  ret i7 %val
76}
77
78define i8 @i8_func_void() #0 {
79  ; CHECK-LABEL: name: i8_func_void
80  ; CHECK: bb.1 (%ir-block.0):
81  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
82  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) undef`, addrspace 1)
83  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
84  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
85  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
86  %val = load i8, ptr addrspace(1) undef
87  ret i8 %val
88}
89
90define zeroext i8 @i8_zeroext_func_void() #0 {
91  ; CHECK-LABEL: name: i8_zeroext_func_void
92  ; CHECK: bb.1 (%ir-block.0):
93  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
94  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) undef`, addrspace 1)
95  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8)
96  ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
97  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
98  %val = load i8, ptr addrspace(1) undef
99  ret i8 %val
100}
101
102define signext i8 @i8_signext_func_void() #0 {
103  ; CHECK-LABEL: name: i8_signext_func_void
104  ; CHECK: bb.1 (%ir-block.0):
105  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
106  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) undef`, addrspace 1)
107  ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8)
108  ; CHECK-NEXT:   $vgpr0 = COPY [[SEXT]](s32)
109  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
110  %val = load i8, ptr addrspace(1) undef
111  ret i8 %val
112}
113
114define i16 @i16_func_void() #0 {
115  ; CHECK-LABEL: name: i16_func_void
116  ; CHECK: bb.1 (%ir-block.0):
117  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
118  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) undef`, addrspace 1)
119  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
120  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
121  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
122  %val = load i16, ptr addrspace(1) undef
123  ret i16 %val
124}
125
126define zeroext i16 @i16_zeroext_func_void() #0 {
127  ; CHECK-LABEL: name: i16_zeroext_func_void
128  ; CHECK: bb.1 (%ir-block.0):
129  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
130  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) undef`, addrspace 1)
131  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
132  ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
133  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
134  %val = load i16, ptr addrspace(1) undef
135  ret i16 %val
136}
137
138define signext i16 @i16_signext_func_void() #0 {
139  ; CHECK-LABEL: name: i16_signext_func_void
140  ; CHECK: bb.1 (%ir-block.0):
141  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
142  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) undef`, addrspace 1)
143  ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s16)
144  ; CHECK-NEXT:   $vgpr0 = COPY [[SEXT]](s32)
145  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
146  %val = load i16, ptr addrspace(1) undef
147  ret i16 %val
148}
149
150define half @f16_func_void() #0 {
151  ; CHECK-LABEL: name: f16_func_void
152  ; CHECK: bb.1 (%ir-block.0):
153  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
154  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `ptr addrspace(1) undef`, addrspace 1)
155  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
156  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
157  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
158  %val = load half, ptr addrspace(1) undef
159  ret half %val
160}
161
162define i24 @i24_func_void() #0 {
163  ; CHECK-LABEL: name: i24_func_void
164  ; CHECK: bb.1 (%ir-block.0):
165  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
166  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `ptr addrspace(1) undef`, align 4, addrspace 1)
167  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s24)
168  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
169  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
170  %val = load i24, ptr addrspace(1) undef
171  ret i24 %val
172}
173
174define zeroext i24 @i24_zeroext_func_void() #0 {
175  ; CHECK-LABEL: name: i24_zeroext_func_void
176  ; CHECK: bb.1 (%ir-block.0):
177  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
178  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `ptr addrspace(1) undef`, align 4, addrspace 1)
179  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s24)
180  ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
181  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
182  %val = load i24, ptr addrspace(1) undef
183  ret i24 %val
184}
185
186define signext i24 @i24_signext_func_void() #0 {
187  ; CHECK-LABEL: name: i24_signext_func_void
188  ; CHECK: bb.1 (%ir-block.0):
189  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
190  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `ptr addrspace(1) undef`, align 4, addrspace 1)
191  ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s24)
192  ; CHECK-NEXT:   $vgpr0 = COPY [[SEXT]](s32)
193  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
194  %val = load i24, ptr addrspace(1) undef
195  ret i24 %val
196}
197
198define <2 x i24> @v2i24_func_void() #0 {
199  ; CHECK-LABEL: name: v2i24_func_void
200  ; CHECK: bb.1 (%ir-block.0):
201  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
202  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s24>) = G_LOAD [[DEF]](p1) :: (load (<2 x s24>) from `ptr addrspace(1) undef`, align 8, addrspace 1)
203  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s24), [[UV1:%[0-9]+]]:_(s24) = G_UNMERGE_VALUES [[LOAD]](<2 x s24>)
204  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s24)
205  ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s24)
206  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
207  ; CHECK-NEXT:   $vgpr1 = COPY [[ANYEXT1]](s32)
208  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
209  %val = load <2 x i24>, ptr addrspace(1) undef
210  ret <2 x i24> %val
211}
212
213define <3 x i24> @v3i24_func_void() #0 {
214  ; CHECK-LABEL: name: v3i24_func_void
215  ; CHECK: bb.1 (%ir-block.0):
216  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
217  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<3 x s24>) = G_LOAD [[DEF]](p1) :: (load (<3 x s24>) from `ptr addrspace(1) undef`, align 16, addrspace 1)
218  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s24), [[UV1:%[0-9]+]]:_(s24), [[UV2:%[0-9]+]]:_(s24) = G_UNMERGE_VALUES [[LOAD]](<3 x s24>)
219  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s24)
220  ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s24)
221  ; CHECK-NEXT:   [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s24)
222  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
223  ; CHECK-NEXT:   $vgpr1 = COPY [[ANYEXT1]](s32)
224  ; CHECK-NEXT:   $vgpr2 = COPY [[ANYEXT2]](s32)
225  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
226  %val = load <3 x i24>, ptr addrspace(1) undef
227  ret <3 x i24> %val
228}
229
230define i32 @i32_func_void() #0 {
231  ; CHECK-LABEL: name: i32_func_void
232  ; CHECK: bb.1 (%ir-block.0):
233  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
234  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
235  ; CHECK-NEXT:   $vgpr0 = COPY [[LOAD]](s32)
236  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
237  %val = load i32, ptr addrspace(1) undef
238  ret i32 %val
239}
240
241define i48 @i48_func_void() #0 {
242  ; CHECK-LABEL: name: i48_func_void
243  ; CHECK: bb.1 (%ir-block.0):
244  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
245  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `ptr addrspace(1) undef`, align 8, addrspace 1)
246  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s48)
247  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s64)
248  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
249  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
250  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
251  %val = load i48, ptr addrspace(1) undef, align 8
252  ret i48 %val
253}
254
255define signext i48 @i48_signext_func_void() #0 {
256  ; CHECK-LABEL: name: i48_signext_func_void
257  ; CHECK: bb.1 (%ir-block.0):
258  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
259  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `ptr addrspace(1) undef`, align 8, addrspace 1)
260  ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s48)
261  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
262  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
263  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
264  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
265  %val = load i48, ptr addrspace(1) undef, align 8
266  ret i48 %val
267}
268
269define zeroext i48 @i48_zeroext_func_void() #0 {
270  ; CHECK-LABEL: name: i48_zeroext_func_void
271  ; CHECK: bb.1 (%ir-block.0):
272  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
273  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `ptr addrspace(1) undef`, align 8, addrspace 1)
274  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s48)
275  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
276  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
277  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
278  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
279  %val = load i48, ptr addrspace(1) undef, align 8
280  ret i48 %val
281}
282
283define i64 @i64_func_void() #0 {
284  ; CHECK-LABEL: name: i64_func_void
285  ; CHECK: bb.1 (%ir-block.0):
286  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
287  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load (s64) from `ptr addrspace(1) undef`, addrspace 1)
288  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
289  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
290  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
291  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
292  %val = load i64, ptr addrspace(1) undef
293  ret i64 %val
294}
295
296define i65 @i65_func_void() #0 {
297  ; CHECK-LABEL: name: i65_func_void
298  ; CHECK: bb.1 (%ir-block.0):
299  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
300  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `ptr addrspace(1) undef`, align 8, addrspace 1)
301  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s96) = G_ANYEXT [[LOAD]](s65)
302  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s96)
303  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
304  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
305  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
306  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
307  %val = load i65, ptr addrspace(1) undef
308  ret i65 %val
309}
310
311define signext i65 @i65_signext_func_void() #0 {
312  ; CHECK-LABEL: name: i65_signext_func_void
313  ; CHECK: bb.1 (%ir-block.0):
314  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
315  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `ptr addrspace(1) undef`, align 8, addrspace 1)
316  ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s96) = G_SEXT [[LOAD]](s65)
317  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s96)
318  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
319  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
320  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
321  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
322  %val = load i65, ptr addrspace(1) undef
323  ret i65 %val
324}
325
326define zeroext i65 @i65_zeroext_func_void() #0 {
327  ; CHECK-LABEL: name: i65_zeroext_func_void
328  ; CHECK: bb.1 (%ir-block.0):
329  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
330  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `ptr addrspace(1) undef`, align 8, addrspace 1)
331  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s96) = G_ZEXT [[LOAD]](s65)
332  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s96)
333  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
334  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
335  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
336  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
337  %val = load i65, ptr addrspace(1) undef
338  ret i65 %val
339}
340
341define float @f32_func_void() #0 {
342  ; CHECK-LABEL: name: f32_func_void
343  ; CHECK: bb.1 (%ir-block.0):
344  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
345  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
346  ; CHECK-NEXT:   $vgpr0 = COPY [[LOAD]](s32)
347  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
348  %val = load float, ptr addrspace(1) undef
349  ret float %val
350}
351
352define double @f64_func_void() #0 {
353  ; CHECK-LABEL: name: f64_func_void
354  ; CHECK: bb.1 (%ir-block.0):
355  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
356  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load (s64) from `ptr addrspace(1) undef`, addrspace 1)
357  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
358  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
359  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
360  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
361  %val = load double, ptr addrspace(1) undef
362  ret double %val
363}
364
365define <2 x double> @v2f64_func_void() #0 {
366  ; CHECK-LABEL: name: v2f64_func_void
367  ; CHECK: bb.1 (%ir-block.0):
368  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
369  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load (<2 x s64>) from `ptr addrspace(1) undef`, addrspace 1)
370  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
371  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
372  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
373  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
374  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
375  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
376  %val = load <2 x double>, ptr addrspace(1) undef
377  ret <2 x double> %val
378}
379
380define <2 x i32> @v2i32_func_void() #0 {
381  ; CHECK-LABEL: name: v2i32_func_void
382  ; CHECK: bb.1 (%ir-block.0):
383  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
384  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[DEF]](p1) :: (load (<2 x s32>) from `ptr addrspace(1) undef`, addrspace 1)
385  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
386  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
387  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
388  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
389  %val = load <2 x i32>, ptr addrspace(1) undef
390  ret <2 x i32> %val
391}
392
393define <3 x i32> @v3i32_func_void() #0 {
394  ; CHECK-LABEL: name: v3i32_func_void
395  ; CHECK: bb.1 (%ir-block.0):
396  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
397  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[DEF]](p1) :: (load (<3 x s32>) from `ptr addrspace(1) undef`, align 16, addrspace 1)
398  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<3 x s32>)
399  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
400  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
401  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
402  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
403  %val = load <3 x i32>, ptr addrspace(1) undef
404  ret <3 x i32> %val
405}
406
407define <4 x i32> @v4i32_func_void() #0 {
408  ; CHECK-LABEL: name: v4i32_func_void
409  ; CHECK: bb.1 (%ir-block.0):
410  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
411  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[DEF]](p1) :: (load (<4 x s32>) from `ptr addrspace(1) undef`, addrspace 1)
412  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
413  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
414  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
415  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
416  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
417  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
418  %val = load <4 x i32>, ptr addrspace(1) undef
419  ret <4 x i32> %val
420}
421
422define <5 x i32> @v5i32_func_void() #0 {
423  ; CHECK-LABEL: name: v5i32_func_void
424  ; CHECK: bb.1 (%ir-block.0):
425  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
426  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<5 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load (<5 x s32>) from `ptr addrspace(1) undef`, align 32, addrspace 1)
427  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<5 x s32>)
428  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
429  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
430  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
431  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
432  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
433  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
434  %val = load volatile <5 x i32>, ptr addrspace(1) undef
435  ret <5 x i32> %val
436}
437
438define <8 x i32> @v8i32_func_void() #0 {
439  ; CHECK-LABEL: name: v8i32_func_void
440  ; CHECK: bb.1 (%ir-block.0):
441  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
442  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
443  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s32>) from %ir.ptr, addrspace 1)
444  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s32>)
445  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
446  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
447  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
448  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
449  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
450  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
451  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
452  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
453  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
454  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
455  %val = load <8 x i32>, ptr addrspace(1) %ptr
456  ret <8 x i32> %val
457}
458
459define <16 x i32> @v16i32_func_void() #0 {
460  ; CHECK-LABEL: name: v16i32_func_void
461  ; CHECK: bb.1 (%ir-block.0):
462  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
463  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
464  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s32>) from %ir.ptr, addrspace 1)
465  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>)
466  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
467  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
468  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
469  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
470  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
471  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
472  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
473  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
474  ; CHECK-NEXT:   $vgpr8 = COPY [[UV8]](s32)
475  ; CHECK-NEXT:   $vgpr9 = COPY [[UV9]](s32)
476  ; CHECK-NEXT:   $vgpr10 = COPY [[UV10]](s32)
477  ; CHECK-NEXT:   $vgpr11 = COPY [[UV11]](s32)
478  ; CHECK-NEXT:   $vgpr12 = COPY [[UV12]](s32)
479  ; CHECK-NEXT:   $vgpr13 = COPY [[UV13]](s32)
480  ; CHECK-NEXT:   $vgpr14 = COPY [[UV14]](s32)
481  ; CHECK-NEXT:   $vgpr15 = COPY [[UV15]](s32)
482  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
483  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
484  %val = load <16 x i32>, ptr addrspace(1) %ptr
485  ret <16 x i32> %val
486}
487
488define <32 x i32> @v32i32_func_void() #0 {
489  ; CHECK-LABEL: name: v32i32_func_void
490  ; CHECK: bb.1 (%ir-block.0):
491  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
492  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
493  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<32 x s32>) from %ir.ptr, addrspace 1)
494  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<32 x s32>)
495  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
496  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
497  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
498  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
499  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
500  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
501  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
502  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
503  ; CHECK-NEXT:   $vgpr8 = COPY [[UV8]](s32)
504  ; CHECK-NEXT:   $vgpr9 = COPY [[UV9]](s32)
505  ; CHECK-NEXT:   $vgpr10 = COPY [[UV10]](s32)
506  ; CHECK-NEXT:   $vgpr11 = COPY [[UV11]](s32)
507  ; CHECK-NEXT:   $vgpr12 = COPY [[UV12]](s32)
508  ; CHECK-NEXT:   $vgpr13 = COPY [[UV13]](s32)
509  ; CHECK-NEXT:   $vgpr14 = COPY [[UV14]](s32)
510  ; CHECK-NEXT:   $vgpr15 = COPY [[UV15]](s32)
511  ; CHECK-NEXT:   $vgpr16 = COPY [[UV16]](s32)
512  ; CHECK-NEXT:   $vgpr17 = COPY [[UV17]](s32)
513  ; CHECK-NEXT:   $vgpr18 = COPY [[UV18]](s32)
514  ; CHECK-NEXT:   $vgpr19 = COPY [[UV19]](s32)
515  ; CHECK-NEXT:   $vgpr20 = COPY [[UV20]](s32)
516  ; CHECK-NEXT:   $vgpr21 = COPY [[UV21]](s32)
517  ; CHECK-NEXT:   $vgpr22 = COPY [[UV22]](s32)
518  ; CHECK-NEXT:   $vgpr23 = COPY [[UV23]](s32)
519  ; CHECK-NEXT:   $vgpr24 = COPY [[UV24]](s32)
520  ; CHECK-NEXT:   $vgpr25 = COPY [[UV25]](s32)
521  ; CHECK-NEXT:   $vgpr26 = COPY [[UV26]](s32)
522  ; CHECK-NEXT:   $vgpr27 = COPY [[UV27]](s32)
523  ; CHECK-NEXT:   $vgpr28 = COPY [[UV28]](s32)
524  ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
525  ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
526  ; CHECK-NEXT:   $vgpr31 = COPY [[UV31]](s32)
527  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
528  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
529  %val = load <32 x i32>, ptr addrspace(1) %ptr
530  ret <32 x i32> %val
531}
532
533define <2 x i64> @v2i64_func_void() #0 {
534  ; CHECK-LABEL: name: v2i64_func_void
535  ; CHECK: bb.1 (%ir-block.0):
536  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
537  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load (<2 x s64>) from `ptr addrspace(1) undef`, addrspace 1)
538  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
539  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
540  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
541  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
542  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
543  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
544  %val = load <2 x i64>, ptr addrspace(1) undef
545  ret <2 x i64> %val
546}
547
548define <3 x i64> @v3i64_func_void() #0 {
549  ; CHECK-LABEL: name: v3i64_func_void
550  ; CHECK: bb.1 (%ir-block.0):
551  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
552  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
553  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<3 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<3 x s64>) from %ir.ptr, align 32, addrspace 1)
554  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s64>)
555  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
556  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
557  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
558  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
559  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
560  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
561  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
562  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
563  %val = load <3 x i64>, ptr addrspace(1) %ptr
564  ret <3 x i64> %val
565}
566
567define <4 x i64> @v4i64_func_void() #0 {
568  ; CHECK-LABEL: name: v4i64_func_void
569  ; CHECK: bb.1 (%ir-block.0):
570  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
571  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
572  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<4 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<4 x s64>) from %ir.ptr, addrspace 1)
573  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s64>)
574  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
575  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
576  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
577  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
578  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
579  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
580  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
581  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
582  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
583  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
584  %val = load <4 x i64>, ptr addrspace(1) %ptr
585  ret <4 x i64> %val
586}
587
588define <5 x i64> @v5i64_func_void() #0 {
589  ; CHECK-LABEL: name: v5i64_func_void
590  ; CHECK: bb.1 (%ir-block.0):
591  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
592  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
593  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<5 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<5 x s64>) from %ir.ptr, align 64, addrspace 1)
594  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<5 x s64>)
595  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
596  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
597  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
598  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
599  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
600  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
601  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
602  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
603  ; CHECK-NEXT:   $vgpr8 = COPY [[UV8]](s32)
604  ; CHECK-NEXT:   $vgpr9 = COPY [[UV9]](s32)
605  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9
606  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
607  %val = load <5 x i64>, ptr addrspace(1) %ptr
608  ret <5 x i64> %val
609}
610
611define <8 x i64> @v8i64_func_void() #0 {
612  ; CHECK-LABEL: name: v8i64_func_void
613  ; CHECK: bb.1 (%ir-block.0):
614  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
615  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
616  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<8 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s64>) from %ir.ptr, addrspace 1)
617  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<8 x s64>)
618  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
619  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
620  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
621  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
622  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
623  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
624  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
625  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
626  ; CHECK-NEXT:   $vgpr8 = COPY [[UV8]](s32)
627  ; CHECK-NEXT:   $vgpr9 = COPY [[UV9]](s32)
628  ; CHECK-NEXT:   $vgpr10 = COPY [[UV10]](s32)
629  ; CHECK-NEXT:   $vgpr11 = COPY [[UV11]](s32)
630  ; CHECK-NEXT:   $vgpr12 = COPY [[UV12]](s32)
631  ; CHECK-NEXT:   $vgpr13 = COPY [[UV13]](s32)
632  ; CHECK-NEXT:   $vgpr14 = COPY [[UV14]](s32)
633  ; CHECK-NEXT:   $vgpr15 = COPY [[UV15]](s32)
634  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
635  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
636  %val = load <8 x i64>, ptr addrspace(1) %ptr
637  ret <8 x i64> %val
638}
639
640define <16 x i64> @v16i64_func_void() #0 {
641  ; CHECK-LABEL: name: v16i64_func_void
642  ; CHECK: bb.1 (%ir-block.0):
643  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
644  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
645  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<16 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s64>) from %ir.ptr, addrspace 1)
646  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s64>)
647  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
648  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
649  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
650  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
651  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
652  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
653  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
654  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
655  ; CHECK-NEXT:   $vgpr8 = COPY [[UV8]](s32)
656  ; CHECK-NEXT:   $vgpr9 = COPY [[UV9]](s32)
657  ; CHECK-NEXT:   $vgpr10 = COPY [[UV10]](s32)
658  ; CHECK-NEXT:   $vgpr11 = COPY [[UV11]](s32)
659  ; CHECK-NEXT:   $vgpr12 = COPY [[UV12]](s32)
660  ; CHECK-NEXT:   $vgpr13 = COPY [[UV13]](s32)
661  ; CHECK-NEXT:   $vgpr14 = COPY [[UV14]](s32)
662  ; CHECK-NEXT:   $vgpr15 = COPY [[UV15]](s32)
663  ; CHECK-NEXT:   $vgpr16 = COPY [[UV16]](s32)
664  ; CHECK-NEXT:   $vgpr17 = COPY [[UV17]](s32)
665  ; CHECK-NEXT:   $vgpr18 = COPY [[UV18]](s32)
666  ; CHECK-NEXT:   $vgpr19 = COPY [[UV19]](s32)
667  ; CHECK-NEXT:   $vgpr20 = COPY [[UV20]](s32)
668  ; CHECK-NEXT:   $vgpr21 = COPY [[UV21]](s32)
669  ; CHECK-NEXT:   $vgpr22 = COPY [[UV22]](s32)
670  ; CHECK-NEXT:   $vgpr23 = COPY [[UV23]](s32)
671  ; CHECK-NEXT:   $vgpr24 = COPY [[UV24]](s32)
672  ; CHECK-NEXT:   $vgpr25 = COPY [[UV25]](s32)
673  ; CHECK-NEXT:   $vgpr26 = COPY [[UV26]](s32)
674  ; CHECK-NEXT:   $vgpr27 = COPY [[UV27]](s32)
675  ; CHECK-NEXT:   $vgpr28 = COPY [[UV28]](s32)
676  ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
677  ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
678  ; CHECK-NEXT:   $vgpr31 = COPY [[UV31]](s32)
679  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
680  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
681  %val = load <16 x i64>, ptr addrspace(1) %ptr
682  ret <16 x i64> %val
683}
684
685define <2 x i16> @v2i16_func_void() #0 {
686  ; CHECK-LABEL: name: v2i16_func_void
687  ; CHECK: bb.1 (%ir-block.0):
688  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
689  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load (<2 x s16>) from `ptr addrspace(1) undef`, addrspace 1)
690  ; CHECK-NEXT:   $vgpr0 = COPY [[LOAD]](<2 x s16>)
691  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
692  %val = load <2 x i16>, ptr addrspace(1) undef
693  ret <2 x i16> %val
694}
695
696define <2 x half> @v2f16_func_void() #0 {
697  ; CHECK-LABEL: name: v2f16_func_void
698  ; CHECK: bb.1 (%ir-block.0):
699  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
700  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load (<2 x s16>) from `ptr addrspace(1) undef`, addrspace 1)
701  ; CHECK-NEXT:   $vgpr0 = COPY [[LOAD]](<2 x s16>)
702  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
703  %val = load <2 x half>, ptr addrspace(1) undef
704  ret <2 x half> %val
705}
706
707define <3 x i16> @v3i16_func_void() #0 {
708  ; CHECK-LABEL: name: v3i16_func_void
709  ; CHECK: bb.1 (%ir-block.0):
710  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
711  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[DEF]](p1) :: (load (<3 x s16>) from `ptr addrspace(1) undef`, align 8, addrspace 1)
712  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<3 x s16>)
713  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
714  ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16), [[DEF1]](s16)
715  ; CHECK-NEXT:   [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s16>)
716  ; CHECK-NEXT:   $vgpr0 = COPY [[UV3]](<2 x s16>)
717  ; CHECK-NEXT:   $vgpr1 = COPY [[UV4]](<2 x s16>)
718  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
719  %val = load <3 x i16>, ptr addrspace(1) undef
720  ret <3 x i16> %val
721}
722
723define <4 x i16> @v4i16_func_void() #0 {
724  ; CHECK-LABEL: name: v4i16_func_void
725  ; CHECK: bb.1 (%ir-block.0):
726  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
727  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load (<4 x s16>) from `ptr addrspace(1) undef`, addrspace 1)
728  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
729  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](<2 x s16>)
730  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](<2 x s16>)
731  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
732  %val = load <4 x i16>, ptr addrspace(1) undef
733  ret <4 x i16> %val
734}
735
736define <4 x half> @v4f16_func_void() #0 {
737  ; CHECK-LABEL: name: v4f16_func_void
738  ; CHECK: bb.1 (%ir-block.0):
739  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
740  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load (<4 x s16>) from `ptr addrspace(1) undef`, addrspace 1)
741  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
742  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](<2 x s16>)
743  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](<2 x s16>)
744  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
745  %val = load <4 x half>, ptr addrspace(1) undef
746  ret <4 x half> %val
747}
748
749define <5 x i16> @v5i16_func_void() #0 {
750  ; CHECK-LABEL: name: v5i16_func_void
751  ; CHECK: bb.1 (%ir-block.0):
752  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
753  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
754  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<5 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<5 x s16>) from %ir.ptr, align 16, addrspace 1)
755  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD1]](<5 x s16>)
756  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
757  ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<6 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16), [[UV3]](s16), [[UV4]](s16), [[DEF1]](s16)
758  ; CHECK-NEXT:   [[UV5:%[0-9]+]]:_(<2 x s16>), [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<6 x s16>)
759  ; CHECK-NEXT:   $vgpr0 = COPY [[UV5]](<2 x s16>)
760  ; CHECK-NEXT:   $vgpr1 = COPY [[UV6]](<2 x s16>)
761  ; CHECK-NEXT:   $vgpr2 = COPY [[UV7]](<2 x s16>)
762  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
763  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
764  %val = load <5 x i16>, ptr addrspace(1) %ptr
765  ret <5 x i16> %val
766}
767
768define <8 x i16> @v8i16_func_void() #0 {
769  ; CHECK-LABEL: name: v8i16_func_void
770  ; CHECK: bb.1 (%ir-block.0):
771  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
772  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
773  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s16>) from %ir.ptr, addrspace 1)
774  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD1]](<8 x s16>)
775  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](<2 x s16>)
776  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](<2 x s16>)
777  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](<2 x s16>)
778  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](<2 x s16>)
779  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
780  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
781  %val = load <8 x i16>, ptr addrspace(1) %ptr
782  ret <8 x i16> %val
783}
784
785define <16 x i16> @v16i16_func_void() #0 {
786  ; CHECK-LABEL: name: v16i16_func_void
787  ; CHECK: bb.1 (%ir-block.0):
788  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
789  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
790  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<16 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s16>) from %ir.ptr, addrspace 1)
791  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>), [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD1]](<16 x s16>)
792  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](<2 x s16>)
793  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](<2 x s16>)
794  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](<2 x s16>)
795  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](<2 x s16>)
796  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](<2 x s16>)
797  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](<2 x s16>)
798  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](<2 x s16>)
799  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](<2 x s16>)
800  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
801  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
802  %val = load <16 x i16>, ptr addrspace(1) %ptr
803  ret <16 x i16> %val
804}
805
806define <16 x i8> @v16i8_func_void() #0 {
807  ; CHECK-LABEL: name: v16i8_func_void
808  ; CHECK: bb.1 (%ir-block.0):
809  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
810  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
811  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s8>) from %ir.ptr, addrspace 1)
812  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8), [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<16 x s8>)
813  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
814  ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
815  ; CHECK-NEXT:   [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
816  ; CHECK-NEXT:   [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
817  ; CHECK-NEXT:   [[ANYEXT4:%[0-9]+]]:_(s16) = G_ANYEXT [[UV4]](s8)
818  ; CHECK-NEXT:   [[ANYEXT5:%[0-9]+]]:_(s16) = G_ANYEXT [[UV5]](s8)
819  ; CHECK-NEXT:   [[ANYEXT6:%[0-9]+]]:_(s16) = G_ANYEXT [[UV6]](s8)
820  ; CHECK-NEXT:   [[ANYEXT7:%[0-9]+]]:_(s16) = G_ANYEXT [[UV7]](s8)
821  ; CHECK-NEXT:   [[ANYEXT8:%[0-9]+]]:_(s16) = G_ANYEXT [[UV8]](s8)
822  ; CHECK-NEXT:   [[ANYEXT9:%[0-9]+]]:_(s16) = G_ANYEXT [[UV9]](s8)
823  ; CHECK-NEXT:   [[ANYEXT10:%[0-9]+]]:_(s16) = G_ANYEXT [[UV10]](s8)
824  ; CHECK-NEXT:   [[ANYEXT11:%[0-9]+]]:_(s16) = G_ANYEXT [[UV11]](s8)
825  ; CHECK-NEXT:   [[ANYEXT12:%[0-9]+]]:_(s16) = G_ANYEXT [[UV12]](s8)
826  ; CHECK-NEXT:   [[ANYEXT13:%[0-9]+]]:_(s16) = G_ANYEXT [[UV13]](s8)
827  ; CHECK-NEXT:   [[ANYEXT14:%[0-9]+]]:_(s16) = G_ANYEXT [[UV14]](s8)
828  ; CHECK-NEXT:   [[ANYEXT15:%[0-9]+]]:_(s16) = G_ANYEXT [[UV15]](s8)
829  ; CHECK-NEXT:   [[ANYEXT16:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
830  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT16]](s32)
831  ; CHECK-NEXT:   [[ANYEXT17:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
832  ; CHECK-NEXT:   $vgpr1 = COPY [[ANYEXT17]](s32)
833  ; CHECK-NEXT:   [[ANYEXT18:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
834  ; CHECK-NEXT:   $vgpr2 = COPY [[ANYEXT18]](s32)
835  ; CHECK-NEXT:   [[ANYEXT19:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT3]](s16)
836  ; CHECK-NEXT:   $vgpr3 = COPY [[ANYEXT19]](s32)
837  ; CHECK-NEXT:   [[ANYEXT20:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT4]](s16)
838  ; CHECK-NEXT:   $vgpr4 = COPY [[ANYEXT20]](s32)
839  ; CHECK-NEXT:   [[ANYEXT21:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT5]](s16)
840  ; CHECK-NEXT:   $vgpr5 = COPY [[ANYEXT21]](s32)
841  ; CHECK-NEXT:   [[ANYEXT22:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT6]](s16)
842  ; CHECK-NEXT:   $vgpr6 = COPY [[ANYEXT22]](s32)
843  ; CHECK-NEXT:   [[ANYEXT23:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT7]](s16)
844  ; CHECK-NEXT:   $vgpr7 = COPY [[ANYEXT23]](s32)
845  ; CHECK-NEXT:   [[ANYEXT24:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT8]](s16)
846  ; CHECK-NEXT:   $vgpr8 = COPY [[ANYEXT24]](s32)
847  ; CHECK-NEXT:   [[ANYEXT25:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT9]](s16)
848  ; CHECK-NEXT:   $vgpr9 = COPY [[ANYEXT25]](s32)
849  ; CHECK-NEXT:   [[ANYEXT26:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT10]](s16)
850  ; CHECK-NEXT:   $vgpr10 = COPY [[ANYEXT26]](s32)
851  ; CHECK-NEXT:   [[ANYEXT27:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT11]](s16)
852  ; CHECK-NEXT:   $vgpr11 = COPY [[ANYEXT27]](s32)
853  ; CHECK-NEXT:   [[ANYEXT28:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT12]](s16)
854  ; CHECK-NEXT:   $vgpr12 = COPY [[ANYEXT28]](s32)
855  ; CHECK-NEXT:   [[ANYEXT29:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT13]](s16)
856  ; CHECK-NEXT:   $vgpr13 = COPY [[ANYEXT29]](s32)
857  ; CHECK-NEXT:   [[ANYEXT30:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT14]](s16)
858  ; CHECK-NEXT:   $vgpr14 = COPY [[ANYEXT30]](s32)
859  ; CHECK-NEXT:   [[ANYEXT31:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT15]](s16)
860  ; CHECK-NEXT:   $vgpr15 = COPY [[ANYEXT31]](s32)
861  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
862  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
863  %val = load <16 x i8>, ptr addrspace(1) %ptr
864  ret <16 x i8> %val
865}
866
867define <2 x i8> @v2i8_func_void() #0 {
868  ; CHECK-LABEL: name: v2i8_func_void
869  ; CHECK: bb.1 (%ir-block.0):
870  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
871  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s8>) = G_LOAD [[DEF]](p1) :: (load (<2 x s8>) from `ptr addrspace(1) undef`, addrspace 1)
872  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD]](<2 x s8>)
873  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
874  ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
875  ; CHECK-NEXT:   [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
876  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT2]](s32)
877  ; CHECK-NEXT:   [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
878  ; CHECK-NEXT:   $vgpr1 = COPY [[ANYEXT3]](s32)
879  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
880  %val = load <2 x i8>, ptr addrspace(1) undef
881  ret <2 x i8> %val
882}
883
884define <3 x i8> @v3i8_func_void() #0 {
885  ; CHECK-LABEL: name: v3i8_func_void
886  ; CHECK: bb.1 (%ir-block.0):
887  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
888  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<3 x s8>) = G_LOAD [[DEF]](p1) :: (load (<3 x s8>) from `ptr addrspace(1) undef`, align 4, addrspace 1)
889  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD]](<3 x s8>)
890  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
891  ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
892  ; CHECK-NEXT:   [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
893  ; CHECK-NEXT:   [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
894  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT3]](s32)
895  ; CHECK-NEXT:   [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
896  ; CHECK-NEXT:   $vgpr1 = COPY [[ANYEXT4]](s32)
897  ; CHECK-NEXT:   [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
898  ; CHECK-NEXT:   $vgpr2 = COPY [[ANYEXT5]](s32)
899  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
900  %val = load <3 x i8>, ptr addrspace(1) undef
901  ret <3 x i8> %val
902}
903
904define <4  x i8> @v4i8_func_void() #0 {
905  ; CHECK-LABEL: name: v4i8_func_void
906  ; CHECK: bb.1 (%ir-block.0):
907  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
908  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
909  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[LOAD]](p1) :: (load (<4 x s8>) from %ir.ptr, addrspace 1)
910  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD1]](<4 x s8>)
911  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV]](s8)
912  ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
913  ; CHECK-NEXT:   [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[UV2]](s8)
914  ; CHECK-NEXT:   [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
915  ; CHECK-NEXT:   [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
916  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT4]](s32)
917  ; CHECK-NEXT:   [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
918  ; CHECK-NEXT:   $vgpr1 = COPY [[ANYEXT5]](s32)
919  ; CHECK-NEXT:   [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
920  ; CHECK-NEXT:   $vgpr2 = COPY [[ANYEXT6]](s32)
921  ; CHECK-NEXT:   [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT3]](s16)
922  ; CHECK-NEXT:   $vgpr3 = COPY [[ANYEXT7]](s32)
923  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
924  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
925  %val = load <4  x i8>, ptr addrspace(1) %ptr
926  ret <4  x i8> %val
927}
928
929define {i8, i32} @struct_i8_i32_func_void() #0 {
930  ; CHECK-LABEL: name: struct_i8_i32_func_void
931  ; CHECK: bb.1 (%ir-block.0):
932  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
933  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `ptr addrspace(1) undef`, align 4, addrspace 1)
934  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
935  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
936  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from `ptr addrspace(1) undef` + 4, addrspace 1)
937  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
938  ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
939  ; CHECK-NEXT:   $vgpr1 = COPY [[LOAD1]](s32)
940  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
941  %val = load { i8, i32 }, ptr addrspace(1) undef
942  ret { i8, i32 } %val
943}
944
945define void @void_func_sret_struct_i8_i32(ptr addrspace(5) sret({ i8, i32 }) %arg0) #0 {
946  ; CHECK-LABEL: name: void_func_sret_struct_i8_i32
947  ; CHECK: bb.1 (%ir-block.0):
948  ; CHECK-NEXT:   liveins: $vgpr0
949  ; CHECK-NEXT: {{  $}}
950  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
951  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
952  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (volatile load (s8) from `ptr addrspace(1) undef`, addrspace 1)
953  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (volatile load (s32) from `ptr addrspace(1) undef`, addrspace 1)
954  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
955  ; CHECK-NEXT:   %13:_(p5) = nuw nusw G_PTR_ADD [[COPY]], [[C]](s32)
956  ; CHECK-NEXT:   G_STORE [[LOAD]](s8), [[COPY]](p5) :: (store (s8) into %ir.arg0, addrspace 5)
957  ; CHECK-NEXT:   G_STORE [[LOAD1]](s32), %13(p5) :: (store (s32) into %ir.gep1, addrspace 5)
958  ; CHECK-NEXT:   SI_RETURN
959  %val0 = load volatile i8, ptr addrspace(1) undef
960  %val1 = load volatile i32, ptr addrspace(1) undef
961  %gep0 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %arg0, i32 0, i32 0
962  %gep1 = getelementptr inbounds { i8, i32 }, ptr addrspace(5) %arg0, i32 0, i32 1
963  store i8 %val0, ptr addrspace(5) %gep0
964  store i32 %val1, ptr addrspace(5) %gep1
965  ret void
966}
967
968; FIXME: Should be able to fold offsets in all of these pre-gfx9. Call
969; lowering introduces an extra CopyToReg/CopyFromReg obscuring the
970; AssertZext inserted. Not using it introduces the spills.
971
972define <33 x i32> @v33i32_func_void() #0 {
973  ; CHECK-LABEL: name: v33i32_func_void
974  ; CHECK: bb.1 (%ir-block.0):
975  ; CHECK-NEXT:   liveins: $vgpr0
976  ; CHECK-NEXT: {{  $}}
977  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
978  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
979  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
980  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<33 x s32>) from %ir.ptr, align 256, addrspace 1)
981  ; CHECK-NEXT:   G_STORE [[LOAD1]](<33 x s32>), [[COPY]](p5) :: (store (<33 x s32>), align 256, addrspace 5)
982  ; CHECK-NEXT:   SI_RETURN
983  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
984  %val = load <33 x i32>, ptr addrspace(1) %ptr
985  ret <33 x i32> %val
986}
987
988define <33 x i32> @v33i32_func_v33i32_i32(ptr addrspace(1) %p, i32 %idx) #0 {
989  ; CHECK-LABEL: name: v33i32_func_v33i32_i32
990  ; CHECK: bb.1 (%ir-block.0):
991  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
992  ; CHECK-NEXT: {{  $}}
993  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
994  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
995  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
996  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
997  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
998  ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY3]](s32)
999  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
1000  ; CHECK-NEXT:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
1001  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV]], [[MUL]](s64)
1002  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(p1) = COPY [[PTR_ADD]](p1)
1003  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[COPY4]](p1) :: (load (<33 x s32>) from %ir.gep, align 256, addrspace 1)
1004  ; CHECK-NEXT:   G_STORE [[LOAD]](<33 x s32>), [[COPY]](p5) :: (store (<33 x s32>), align 256, addrspace 5)
1005  ; CHECK-NEXT:   SI_RETURN
1006  %gep = getelementptr inbounds <33 x i32>, ptr addrspace(1) %p, i32 %idx
1007  %val = load <33 x i32>, ptr addrspace(1) %gep
1008  ret <33 x i32> %val
1009}
1010
1011define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
1012  ; CHECK-LABEL: name: struct_v32i32_i32_func_void
1013  ; CHECK: bb.1 (%ir-block.0):
1014  ; CHECK-NEXT:   liveins: $vgpr0
1015  ; CHECK-NEXT: {{  $}}
1016  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1017  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1018  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
1019  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<32 x s32>) from %ir.ptr, addrspace 1)
1020  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
1021  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
1022  ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from %ir.ptr + 128, align 128, addrspace 1)
1023  ; CHECK-NEXT:   G_STORE [[LOAD1]](<32 x s32>), [[COPY]](p5) :: (store (<32 x s32>), addrspace 5)
1024  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1025  ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
1026  ; CHECK-NEXT:   G_STORE [[LOAD2]](s32), [[PTR_ADD1]](p5) :: (store (s32), align 128, addrspace 5)
1027  ; CHECK-NEXT:   SI_RETURN
1028  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
1029  %val = load { <32 x i32>, i32 }, ptr addrspace(1) %ptr
1030  ret { <32 x i32>, i32 }%val
1031}
1032
1033define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
1034  ; CHECK-LABEL: name: struct_i32_v32i32_func_void
1035  ; CHECK: bb.1 (%ir-block.0):
1036  ; CHECK-NEXT:   liveins: $vgpr0
1037  ; CHECK-NEXT: {{  $}}
1038  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1039  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
1040  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile invariant load (p1) from `ptr addrspace(4) undef`, addrspace 4)
1041  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p1) :: (load (s32) from %ir.ptr, align 128, addrspace 1)
1042  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
1043  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[LOAD]], [[C]](s64)
1044  ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<32 x s32>) from %ir.ptr + 128, addrspace 1)
1045  ; CHECK-NEXT:   G_STORE [[LOAD1]](s32), [[COPY]](p5) :: (store (s32), align 128, addrspace 5)
1046  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1047  ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
1048  ; CHECK-NEXT:   G_STORE [[LOAD2]](<32 x s32>), [[PTR_ADD1]](p5) :: (store (<32 x s32>), addrspace 5)
1049  ; CHECK-NEXT:   SI_RETURN
1050  %ptr = load volatile ptr addrspace(1), ptr addrspace(4) undef
1051  %val = load { i32, <32 x i32> }, ptr addrspace(1) %ptr
1052  ret { i32, <32 x i32> }%val
1053}
1054
1055; Make sure the last struct component is returned in v3, not v4.
1056define { <3 x i32>, i32 } @v3i32_struct_func_void_wasted_reg() #0 {
1057  ; CHECK-LABEL: name: v3i32_struct_func_void_wasted_reg
1058  ; CHECK: bb.1 (%ir-block.0):
1059  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1060  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
1061  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1062  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1063  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1064  ; CHECK-NEXT:   [[DEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
1065  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `ptr addrspace(3) undef`, addrspace 3)
1066  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `ptr addrspace(3) undef`, addrspace 3)
1067  ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `ptr addrspace(3) undef`, addrspace 3)
1068  ; CHECK-NEXT:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `ptr addrspace(3) undef`, addrspace 3)
1069  ; CHECK-NEXT:   [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF1]], [[LOAD]](s32), [[C]](s32)
1070  ; CHECK-NEXT:   [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
1071  ; CHECK-NEXT:   [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
1072  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[IVEC2]](<3 x s32>)
1073  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
1074  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
1075  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
1076  ; CHECK-NEXT:   $vgpr3 = COPY [[LOAD3]](s32)
1077  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1078  %load0 = load volatile i32, ptr addrspace(3) undef
1079  %load1 = load volatile i32, ptr addrspace(3) undef
1080  %load2 = load volatile i32, ptr addrspace(3) undef
1081  %load3 = load volatile i32, ptr addrspace(3) undef
1082
1083  %insert.0 = insertelement <3 x i32> undef, i32 %load0, i32 0
1084  %insert.1 = insertelement <3 x i32> %insert.0, i32 %load1, i32 1
1085  %insert.2 = insertelement <3 x i32> %insert.1, i32 %load2, i32 2
1086  %insert.3 = insertvalue { <3 x i32>, i32 } undef, <3 x i32> %insert.2, 0
1087  %insert.4 = insertvalue { <3 x i32>, i32 } %insert.3, i32 %load3, 1
1088  ret { <3 x i32>, i32 } %insert.4
1089}
1090
1091define { <3 x float>, i32 } @v3f32_struct_func_void_wasted_reg() #0 {
1092  ; CHECK-LABEL: name: v3f32_struct_func_void_wasted_reg
1093  ; CHECK: bb.1 (%ir-block.0):
1094  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1095  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
1096  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
1097  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
1098  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1099  ; CHECK-NEXT:   [[DEF2:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
1100  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `ptr addrspace(3) undef`, addrspace 3)
1101  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `ptr addrspace(3) undef`, addrspace 3)
1102  ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `ptr addrspace(3) undef`, addrspace 3)
1103  ; CHECK-NEXT:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `ptr addrspace(3) undef`, addrspace 3)
1104  ; CHECK-NEXT:   [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF1]], [[LOAD]](s32), [[C]](s32)
1105  ; CHECK-NEXT:   [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
1106  ; CHECK-NEXT:   [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
1107  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[IVEC2]](<3 x s32>)
1108  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
1109  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
1110  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
1111  ; CHECK-NEXT:   $vgpr3 = COPY [[LOAD3]](s32)
1112  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
1113  %load0 = load volatile float, ptr addrspace(3) undef
1114  %load1 = load volatile float, ptr addrspace(3) undef
1115  %load2 = load volatile float, ptr addrspace(3) undef
1116  %load3 = load volatile i32, ptr addrspace(3) undef
1117
1118  %insert.0 = insertelement <3 x float> undef, float %load0, i32 0
1119  %insert.1 = insertelement <3 x float> %insert.0, float %load1, i32 1
1120  %insert.2 = insertelement <3 x float> %insert.1, float %load2, i32 2
1121  %insert.3 = insertvalue { <3 x float>, i32 } undef, <3 x float> %insert.2, 0
1122  %insert.4 = insertvalue { <3 x float>, i32 } %insert.3, i32 %load3, 1
1123  ret { <3 x float>, i32 } %insert.4
1124}
1125
1126define void @void_func_sret_max_known_zero_bits(ptr addrspace(5) sret(i8) %arg0) #0 {
1127  ; CHECK-LABEL: name: void_func_sret_max_known_zero_bits
1128  ; CHECK: bb.1 (%ir-block.0):
1129  ; CHECK-NEXT:   liveins: $vgpr0
1130  ; CHECK-NEXT: {{  $}}
1131  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1132  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
1133  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 17
1134  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 18
1135  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
1136  ; CHECK-NEXT:   [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[COPY]](p5)
1137  ; CHECK-NEXT:   [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C]](s32)
1138  ; CHECK-NEXT:   [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C1]](s32)
1139  ; CHECK-NEXT:   [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[PTRTOINT]], [[C2]](s32)
1140  ; CHECK-NEXT:   G_STORE [[LSHR]](s32), [[DEF]](p3) :: (volatile store (s32) into `ptr addrspace(3) undef`, addrspace 3)
1141  ; CHECK-NEXT:   G_STORE [[LSHR1]](s32), [[DEF]](p3) :: (volatile store (s32) into `ptr addrspace(3) undef`, addrspace 3)
1142  ; CHECK-NEXT:   G_STORE [[LSHR2]](s32), [[DEF]](p3) :: (volatile store (s32) into `ptr addrspace(3) undef`, addrspace 3)
1143  ; CHECK-NEXT:   SI_RETURN
1144  %arg0.int = ptrtoint ptr addrspace(5) %arg0 to i32
1145
1146  %lshr0 = lshr i32 %arg0.int, 16
1147  %lshr1 = lshr i32 %arg0.int, 17
1148  %lshr2 = lshr i32 %arg0.int, 18
1149
1150  store volatile i32 %lshr0, ptr addrspace(3) undef
1151  store volatile i32 %lshr1, ptr addrspace(3) undef
1152  store volatile i32 %lshr2, ptr addrspace(3) undef
1153  ret void
1154}
1155
1156define i1022 @i1022_func_void() #0 {
1157  ; CHECK-LABEL: name: i1022_func_void
1158  ; CHECK: bb.1 (%ir-block.0):
1159  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1160  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `ptr addrspace(1) undef`, align 8, addrspace 1)
1161  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s1024) = G_ANYEXT [[LOAD]](s1022)
1162  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s1024)
1163  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
1164  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
1165  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
1166  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
1167  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
1168  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
1169  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
1170  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
1171  ; CHECK-NEXT:   $vgpr8 = COPY [[UV8]](s32)
1172  ; CHECK-NEXT:   $vgpr9 = COPY [[UV9]](s32)
1173  ; CHECK-NEXT:   $vgpr10 = COPY [[UV10]](s32)
1174  ; CHECK-NEXT:   $vgpr11 = COPY [[UV11]](s32)
1175  ; CHECK-NEXT:   $vgpr12 = COPY [[UV12]](s32)
1176  ; CHECK-NEXT:   $vgpr13 = COPY [[UV13]](s32)
1177  ; CHECK-NEXT:   $vgpr14 = COPY [[UV14]](s32)
1178  ; CHECK-NEXT:   $vgpr15 = COPY [[UV15]](s32)
1179  ; CHECK-NEXT:   $vgpr16 = COPY [[UV16]](s32)
1180  ; CHECK-NEXT:   $vgpr17 = COPY [[UV17]](s32)
1181  ; CHECK-NEXT:   $vgpr18 = COPY [[UV18]](s32)
1182  ; CHECK-NEXT:   $vgpr19 = COPY [[UV19]](s32)
1183  ; CHECK-NEXT:   $vgpr20 = COPY [[UV20]](s32)
1184  ; CHECK-NEXT:   $vgpr21 = COPY [[UV21]](s32)
1185  ; CHECK-NEXT:   $vgpr22 = COPY [[UV22]](s32)
1186  ; CHECK-NEXT:   $vgpr23 = COPY [[UV23]](s32)
1187  ; CHECK-NEXT:   $vgpr24 = COPY [[UV24]](s32)
1188  ; CHECK-NEXT:   $vgpr25 = COPY [[UV25]](s32)
1189  ; CHECK-NEXT:   $vgpr26 = COPY [[UV26]](s32)
1190  ; CHECK-NEXT:   $vgpr27 = COPY [[UV27]](s32)
1191  ; CHECK-NEXT:   $vgpr28 = COPY [[UV28]](s32)
1192  ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
1193  ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
1194  ; CHECK-NEXT:   $vgpr31 = COPY [[UV31]](s32)
1195  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
1196  %val = load i1022, ptr addrspace(1) undef
1197  ret i1022 %val
1198}
1199
1200define signext i1022 @i1022_signext_func_void() #0 {
1201  ; CHECK-LABEL: name: i1022_signext_func_void
1202  ; CHECK: bb.1 (%ir-block.0):
1203  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1204  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `ptr addrspace(1) undef`, align 8, addrspace 1)
1205  ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s1024) = G_SEXT [[LOAD]](s1022)
1206  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s1024)
1207  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
1208  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
1209  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
1210  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
1211  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
1212  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
1213  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
1214  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
1215  ; CHECK-NEXT:   $vgpr8 = COPY [[UV8]](s32)
1216  ; CHECK-NEXT:   $vgpr9 = COPY [[UV9]](s32)
1217  ; CHECK-NEXT:   $vgpr10 = COPY [[UV10]](s32)
1218  ; CHECK-NEXT:   $vgpr11 = COPY [[UV11]](s32)
1219  ; CHECK-NEXT:   $vgpr12 = COPY [[UV12]](s32)
1220  ; CHECK-NEXT:   $vgpr13 = COPY [[UV13]](s32)
1221  ; CHECK-NEXT:   $vgpr14 = COPY [[UV14]](s32)
1222  ; CHECK-NEXT:   $vgpr15 = COPY [[UV15]](s32)
1223  ; CHECK-NEXT:   $vgpr16 = COPY [[UV16]](s32)
1224  ; CHECK-NEXT:   $vgpr17 = COPY [[UV17]](s32)
1225  ; CHECK-NEXT:   $vgpr18 = COPY [[UV18]](s32)
1226  ; CHECK-NEXT:   $vgpr19 = COPY [[UV19]](s32)
1227  ; CHECK-NEXT:   $vgpr20 = COPY [[UV20]](s32)
1228  ; CHECK-NEXT:   $vgpr21 = COPY [[UV21]](s32)
1229  ; CHECK-NEXT:   $vgpr22 = COPY [[UV22]](s32)
1230  ; CHECK-NEXT:   $vgpr23 = COPY [[UV23]](s32)
1231  ; CHECK-NEXT:   $vgpr24 = COPY [[UV24]](s32)
1232  ; CHECK-NEXT:   $vgpr25 = COPY [[UV25]](s32)
1233  ; CHECK-NEXT:   $vgpr26 = COPY [[UV26]](s32)
1234  ; CHECK-NEXT:   $vgpr27 = COPY [[UV27]](s32)
1235  ; CHECK-NEXT:   $vgpr28 = COPY [[UV28]](s32)
1236  ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
1237  ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
1238  ; CHECK-NEXT:   $vgpr31 = COPY [[UV31]](s32)
1239  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
1240  %val = load i1022, ptr addrspace(1) undef
1241  ret i1022 %val
1242}
1243
1244define zeroext i1022 @i1022_zeroext_func_void() #0 {
1245  ; CHECK-LABEL: name: i1022_zeroext_func_void
1246  ; CHECK: bb.1 (%ir-block.0):
1247  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1248  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `ptr addrspace(1) undef`, align 8, addrspace 1)
1249  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s1024) = G_ZEXT [[LOAD]](s1022)
1250  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s1024)
1251  ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
1252  ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
1253  ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
1254  ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
1255  ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
1256  ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
1257  ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
1258  ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
1259  ; CHECK-NEXT:   $vgpr8 = COPY [[UV8]](s32)
1260  ; CHECK-NEXT:   $vgpr9 = COPY [[UV9]](s32)
1261  ; CHECK-NEXT:   $vgpr10 = COPY [[UV10]](s32)
1262  ; CHECK-NEXT:   $vgpr11 = COPY [[UV11]](s32)
1263  ; CHECK-NEXT:   $vgpr12 = COPY [[UV12]](s32)
1264  ; CHECK-NEXT:   $vgpr13 = COPY [[UV13]](s32)
1265  ; CHECK-NEXT:   $vgpr14 = COPY [[UV14]](s32)
1266  ; CHECK-NEXT:   $vgpr15 = COPY [[UV15]](s32)
1267  ; CHECK-NEXT:   $vgpr16 = COPY [[UV16]](s32)
1268  ; CHECK-NEXT:   $vgpr17 = COPY [[UV17]](s32)
1269  ; CHECK-NEXT:   $vgpr18 = COPY [[UV18]](s32)
1270  ; CHECK-NEXT:   $vgpr19 = COPY [[UV19]](s32)
1271  ; CHECK-NEXT:   $vgpr20 = COPY [[UV20]](s32)
1272  ; CHECK-NEXT:   $vgpr21 = COPY [[UV21]](s32)
1273  ; CHECK-NEXT:   $vgpr22 = COPY [[UV22]](s32)
1274  ; CHECK-NEXT:   $vgpr23 = COPY [[UV23]](s32)
1275  ; CHECK-NEXT:   $vgpr24 = COPY [[UV24]](s32)
1276  ; CHECK-NEXT:   $vgpr25 = COPY [[UV25]](s32)
1277  ; CHECK-NEXT:   $vgpr26 = COPY [[UV26]](s32)
1278  ; CHECK-NEXT:   $vgpr27 = COPY [[UV27]](s32)
1279  ; CHECK-NEXT:   $vgpr28 = COPY [[UV28]](s32)
1280  ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
1281  ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
1282  ; CHECK-NEXT:   $vgpr31 = COPY [[UV31]](s32)
1283  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
1284  %val = load i1022, ptr addrspace(1) undef
1285  ret i1022 %val
1286}
1287
1288%struct.with.ptrs = type { <32 x i32>, ptr addrspace(3), ptr addrspace(1), <2 x ptr addrspace(1)> }
1289
1290define %struct.with.ptrs @ptr_in_struct_func_void() #0 {
1291  ; CHECK-LABEL: name: ptr_in_struct_func_void
1292  ; CHECK: bb.1 (%ir-block.0):
1293  ; CHECK-NEXT:   liveins: $vgpr0
1294  ; CHECK-NEXT: {{  $}}
1295  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
1296  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
1297  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load (<32 x s32>) from `ptr addrspace(1) undef`, addrspace 1)
1298  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
1299  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
1300  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p1) :: (volatile load (p3) from `ptr addrspace(1) undef` + 128, align 128, addrspace 1)
1301  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 136
1302  ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
1303  ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(p1) = G_LOAD [[PTR_ADD1]](p1) :: (volatile load (p1) from `ptr addrspace(1) undef` + 136, addrspace 1)
1304  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 144
1305  ; CHECK-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
1306  ; CHECK-NEXT:   [[LOAD3:%[0-9]+]]:_(<2 x p1>) = G_LOAD [[PTR_ADD2]](p1) :: (volatile load (<2 x p1>) from `ptr addrspace(1) undef` + 144, addrspace 1)
1307  ; CHECK-NEXT:   G_STORE [[LOAD]](<32 x s32>), [[COPY]](p5) :: (store (<32 x s32>), addrspace 5)
1308  ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
1309  ; CHECK-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C3]](s32)
1310  ; CHECK-NEXT:   G_STORE [[LOAD1]](p3), [[PTR_ADD3]](p5) :: (store (p3), align 128, addrspace 5)
1311  ; CHECK-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
1312  ; CHECK-NEXT:   [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C4]](s32)
1313  ; CHECK-NEXT:   G_STORE [[LOAD2]](p1), [[PTR_ADD4]](p5) :: (store (p1), addrspace 5)
1314  ; CHECK-NEXT:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
1315  ; CHECK-NEXT:   [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
1316  ; CHECK-NEXT:   G_STORE [[LOAD3]](<2 x p1>), [[PTR_ADD5]](p5) :: (store (<2 x p1>), addrspace 5)
1317  ; CHECK-NEXT:   SI_RETURN
1318  %val = load volatile %struct.with.ptrs, ptr addrspace(1) undef
1319  ret %struct.with.ptrs %val
1320}
1321
1322attributes #0 = { nounwind }
1323