xref: /llvm-project/llvm/test/CodeGen/AMDGPU/aa-as-infer.ll (revision 5a74a4a667c99a76317e80c49ae7b087b779d6a9)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-attributor -S %s -o - | FileCheck %s
3
4@g1 = protected addrspace(1) externally_initialized global i32 0, align 4
5@g2 = protected addrspace(1) externally_initialized global i32 0, align 4
6
7define internal void @volatile_load_store_as_0(ptr %p) {
8; CHECK-LABEL: define internal void @volatile_load_store_as_0(
9; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
10; CHECK-NEXT:    [[VAL_0:%.*]] = load i32, ptr addrspace(1) @g1, align 4
11; CHECK-NEXT:    [[VAL_1:%.*]] = load volatile i32, ptr [[P]], align 4
12; CHECK-NEXT:    store i32 [[VAL_1]], ptr addrspace(1) @g1, align 4
13; CHECK-NEXT:    store volatile i32 [[VAL_0]], ptr [[P]], align 4
14; CHECK-NEXT:    ret void
15;
16  %val.0 = load i32, ptr addrspace(1) @g1, align 4
17  %val.1 = load volatile i32, ptr %p, align 4
18  store i32 %val.1, ptr addrspace(1) @g1, align 4
19  store volatile i32 %val.0, ptr %p, align 4
20  ret void
21}
22
23define void @call_volatile_load_store_as_0(ptr %p1, ptr %p2) {
24; CHECK-LABEL: define void @call_volatile_load_store_as_0(
25; CHECK-SAME: ptr [[P1:%.*]], ptr [[P2:%.*]]) #[[ATTR0]] {
26; CHECK-NEXT:    call void @volatile_load_store_as_0(ptr [[P1]])
27; CHECK-NEXT:    call void @volatile_load_store_as_0(ptr [[P2]])
28; CHECK-NEXT:    ret void
29;
30  call void @volatile_load_store_as_0(ptr %p1)
31  call void @volatile_load_store_as_0(ptr %p2)
32  ret void
33}
34
35define internal void @volatile_load_store_as_1(ptr %p) {
36; CHECK-LABEL: define internal void @volatile_load_store_as_1(
37; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
38; CHECK-NEXT:    [[VAL_0:%.*]] = load i32, ptr addrspace(1) @g1, align 4
39; CHECK-NEXT:    [[VAL_1:%.*]] = load volatile i32, ptr [[P]], align 4
40; CHECK-NEXT:    store i32 [[VAL_1]], ptr addrspace(1) @g1, align 4
41; CHECK-NEXT:    store volatile i32 [[VAL_0]], ptr [[P]], align 4
42; CHECK-NEXT:    ret void
43;
44  %val.0 = load i32, ptr addrspace(1) @g1, align 4
45  %val.1 = load volatile i32, ptr %p, align 4
46  store i32 %val.1, ptr addrspace(1) @g1, align 4
47  store volatile i32 %val.0, ptr %p, align 4
48  ret void
49}
50
51define void @call_volatile_load_store_as_1(ptr addrspace(1) %p1, ptr addrspace(1) %p2) {
52; CHECK-LABEL: define void @call_volatile_load_store_as_1(
53; CHECK-SAME: ptr addrspace(1) [[P1:%.*]], ptr addrspace(1) [[P2:%.*]]) #[[ATTR0]] {
54; CHECK-NEXT:    [[P1_CAST:%.*]] = addrspacecast ptr addrspace(1) [[P1]] to ptr
55; CHECK-NEXT:    [[P2_CAST:%.*]] = addrspacecast ptr addrspace(1) [[P2]] to ptr
56; CHECK-NEXT:    call void @volatile_load_store_as_1(ptr [[P1_CAST]])
57; CHECK-NEXT:    call void @volatile_load_store_as_1(ptr [[P2_CAST]])
58; CHECK-NEXT:    ret void
59;
60  %p1.cast = addrspacecast ptr addrspace(1) %p1 to ptr
61  %p2.cast = addrspacecast ptr addrspace(1) %p2 to ptr
62  call void @volatile_load_store_as_1(ptr %p1.cast)
63  call void @volatile_load_store_as_1(ptr %p2.cast)
64  ret void
65}
66
67define internal void @volatile_load_store_as_4(ptr %p) {
68  %val.0 = load i32, ptr addrspace(1) @g1, align 4
69  %val.1 = load volatile i32, ptr %p, align 4
70  store i32 %val.1, ptr addrspace(1) @g1, align 4
71  store volatile i32 %val.0, ptr %p, align 4
72  ret void
73}
74
75define void @call_volatile_load_store_as_4(ptr addrspace(4) %p1, ptr addrspace(4) %p2) {
76; CHECK-LABEL: define void @call_volatile_load_store_as_4(
77; CHECK-SAME: ptr addrspace(4) [[P1:%.*]], ptr addrspace(4) [[P2:%.*]]) #[[ATTR0]] {
78; CHECK-NEXT:    [[P1_CAST:%.*]] = addrspacecast ptr addrspace(4) [[P1]] to ptr
79; CHECK-NEXT:    [[P2_CAST:%.*]] = addrspacecast ptr addrspace(4) [[P2]] to ptr
80; CHECK-NEXT:    call void @volatile_load_store_as_1(ptr [[P1_CAST]])
81; CHECK-NEXT:    call void @volatile_load_store_as_1(ptr [[P2_CAST]])
82; CHECK-NEXT:    ret void
83;
84  %p1.cast = addrspacecast ptr addrspace(4) %p1 to ptr
85  %p2.cast = addrspacecast ptr addrspace(4) %p2 to ptr
86  call void @volatile_load_store_as_1(ptr %p1.cast)
87  call void @volatile_load_store_as_1(ptr %p2.cast)
88  ret void
89}
90
91define internal void @can_infer_cmpxchg(ptr %word) {
92; CHECK-LABEL: define internal void @can_infer_cmpxchg(
93; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
94; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
95; CHECK-NEXT:    [[CMPXCHG_0:%.*]] = cmpxchg ptr addrspace(1) [[TMP1]], i32 0, i32 4 monotonic monotonic, align 4
96; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
97; CHECK-NEXT:    [[CMPXCHG_1:%.*]] = cmpxchg ptr addrspace(1) [[TMP2]], i32 0, i32 5 acq_rel monotonic, align 4
98; CHECK-NEXT:    [[TMP3:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
99; CHECK-NEXT:    [[CMPXCHG_2:%.*]] = cmpxchg ptr addrspace(1) [[TMP3]], i32 0, i32 6 acquire monotonic, align 4
100; CHECK-NEXT:    [[TMP4:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
101; CHECK-NEXT:    [[CMPXCHG_3:%.*]] = cmpxchg ptr addrspace(1) [[TMP4]], i32 0, i32 7 release monotonic, align 4
102; CHECK-NEXT:    [[TMP5:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
103; CHECK-NEXT:    [[CMPXCHG_4:%.*]] = cmpxchg ptr addrspace(1) [[TMP5]], i32 0, i32 8 seq_cst monotonic, align 4
104; CHECK-NEXT:    [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
105; CHECK-NEXT:    [[CMPXCHG_5:%.*]] = cmpxchg weak ptr addrspace(1) [[TMP6]], i32 0, i32 9 seq_cst monotonic, align 4
106; CHECK-NEXT:    [[CMPXCHG_6:%.*]] = cmpxchg volatile ptr [[WORD]], i32 0, i32 10 seq_cst monotonic, align 4
107; CHECK-NEXT:    [[CMPXCHG_7:%.*]] = cmpxchg weak volatile ptr [[WORD]], i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
108; CHECK-NEXT:    ret void
109;
110  %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic, align 4
111  %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic, align 4
112  %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic, align 4
113  %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic, align 4
114  %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic, align 4
115  %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic, align 4
116  %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic, align 4
117  %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
118  ret void
119}
120
121define internal void @can_not_infer_cmpxchg(ptr %word) {
122; CHECK-LABEL: define internal void @can_not_infer_cmpxchg(
123; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
124; CHECK-NEXT:    [[CMPXCHG_0:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 4 monotonic monotonic, align 4
125; CHECK-NEXT:    [[CMPXCHG_1:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 5 acq_rel monotonic, align 4
126; CHECK-NEXT:    [[CMPXCHG_2:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 6 acquire monotonic, align 4
127; CHECK-NEXT:    [[CMPXCHG_3:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 7 release monotonic, align 4
128; CHECK-NEXT:    [[CMPXCHG_4:%.*]] = cmpxchg ptr [[WORD]], i32 0, i32 8 seq_cst monotonic, align 4
129; CHECK-NEXT:    [[CMPXCHG_5:%.*]] = cmpxchg weak ptr [[WORD]], i32 0, i32 9 seq_cst monotonic, align 4
130; CHECK-NEXT:    [[CMPXCHG_6:%.*]] = cmpxchg volatile ptr [[WORD]], i32 0, i32 10 seq_cst monotonic, align 4
131; CHECK-NEXT:    [[CMPXCHG_7:%.*]] = cmpxchg weak volatile ptr [[WORD]], i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
132; CHECK-NEXT:    ret void
133;
134  %cmpxchg.0 = cmpxchg ptr %word, i32 0, i32 4 monotonic monotonic, align 4
135  %cmpxchg.1 = cmpxchg ptr %word, i32 0, i32 5 acq_rel monotonic, align 4
136  %cmpxchg.2 = cmpxchg ptr %word, i32 0, i32 6 acquire monotonic, align 4
137  %cmpxchg.3 = cmpxchg ptr %word, i32 0, i32 7 release monotonic, align 4
138  %cmpxchg.4 = cmpxchg ptr %word, i32 0, i32 8 seq_cst monotonic, align 4
139  %cmpxchg.5 = cmpxchg weak ptr %word, i32 0, i32 9 seq_cst monotonic, align 4
140  %cmpxchg.6 = cmpxchg volatile ptr %word, i32 0, i32 10 seq_cst monotonic, align 4
141  %cmpxchg.7 = cmpxchg weak volatile ptr %word, i32 0, i32 11 syncscope("singlethread") seq_cst monotonic, align 4
142  ret void
143}
144
145define internal void @can_infer_atomicrmw(ptr %word) {
146; CHECK-LABEL: define internal void @can_infer_atomicrmw(
147; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
148; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
149; CHECK-NEXT:    [[ATOMICRMW_XCHG:%.*]] = atomicrmw xchg ptr addrspace(1) [[TMP1]], i32 12 monotonic, align 4
150; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
151; CHECK-NEXT:    [[ATOMICRMW_ADD:%.*]] = atomicrmw add ptr addrspace(1) [[TMP2]], i32 13 monotonic, align 4
152; CHECK-NEXT:    [[TMP3:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
153; CHECK-NEXT:    [[ATOMICRMW_SUB:%.*]] = atomicrmw sub ptr addrspace(1) [[TMP3]], i32 14 monotonic, align 4
154; CHECK-NEXT:    [[TMP4:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
155; CHECK-NEXT:    [[ATOMICRMW_AND:%.*]] = atomicrmw and ptr addrspace(1) [[TMP4]], i32 15 monotonic, align 4
156; CHECK-NEXT:    [[TMP5:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
157; CHECK-NEXT:    [[ATOMICRMW_NAND:%.*]] = atomicrmw nand ptr addrspace(1) [[TMP5]], i32 16 monotonic, align 4
158; CHECK-NEXT:    [[TMP6:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
159; CHECK-NEXT:    [[ATOMICRMW_OR:%.*]] = atomicrmw or ptr addrspace(1) [[TMP6]], i32 17 monotonic, align 4
160; CHECK-NEXT:    [[TMP7:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
161; CHECK-NEXT:    [[ATOMICRMW_XOR:%.*]] = atomicrmw xor ptr addrspace(1) [[TMP7]], i32 18 monotonic, align 4
162; CHECK-NEXT:    [[TMP8:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
163; CHECK-NEXT:    [[ATOMICRMW_MAX:%.*]] = atomicrmw max ptr addrspace(1) [[TMP8]], i32 19 monotonic, align 4
164; CHECK-NEXT:    [[ATOMICRMW_MIN:%.*]] = atomicrmw volatile min ptr [[WORD]], i32 20 monotonic, align 4
165; CHECK-NEXT:    [[TMP10:%.*]] = addrspacecast ptr [[WORD]] to ptr addrspace(1)
166; CHECK-NEXT:    [[ATOMICRMW_UMAX:%.*]] = atomicrmw umax ptr addrspace(1) [[TMP10]], i32 21 syncscope("singlethread") monotonic, align 4
167; CHECK-NEXT:    [[ATOMICRMW_UMIN:%.*]] = atomicrmw volatile umin ptr [[WORD]], i32 22 syncscope("singlethread") monotonic, align 4
168; CHECK-NEXT:    ret void
169;
170  %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic, align 4
171  %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic, align 4
172  %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic, align 4
173  %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic, align 4
174  %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic, align 4
175  %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic, align 4
176  %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic, align 4
177  %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic, align 4
178  %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic, align 4
179  %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic, align 4
180  %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic, align 4
181  ret void
182}
183
184define internal void @can_not_infer_atomicrmw(ptr %word) {
185; CHECK-LABEL: define internal void @can_not_infer_atomicrmw(
186; CHECK-SAME: ptr [[WORD:%.*]]) #[[ATTR0]] {
187; CHECK-NEXT:    [[ATOMICRMW_XCHG:%.*]] = atomicrmw xchg ptr [[WORD]], i32 12 monotonic, align 4
188; CHECK-NEXT:    [[ATOMICRMW_ADD:%.*]] = atomicrmw add ptr [[WORD]], i32 13 monotonic, align 4
189; CHECK-NEXT:    [[ATOMICRMW_SUB:%.*]] = atomicrmw sub ptr [[WORD]], i32 14 monotonic, align 4
190; CHECK-NEXT:    [[ATOMICRMW_AND:%.*]] = atomicrmw and ptr [[WORD]], i32 15 monotonic, align 4
191; CHECK-NEXT:    [[ATOMICRMW_NAND:%.*]] = atomicrmw nand ptr [[WORD]], i32 16 monotonic, align 4
192; CHECK-NEXT:    [[ATOMICRMW_OR:%.*]] = atomicrmw or ptr [[WORD]], i32 17 monotonic, align 4
193; CHECK-NEXT:    [[ATOMICRMW_XOR:%.*]] = atomicrmw xor ptr [[WORD]], i32 18 monotonic, align 4
194; CHECK-NEXT:    [[ATOMICRMW_MAX:%.*]] = atomicrmw max ptr [[WORD]], i32 19 monotonic, align 4
195; CHECK-NEXT:    [[ATOMICRMW_MIN:%.*]] = atomicrmw volatile min ptr [[WORD]], i32 20 monotonic, align 4
196; CHECK-NEXT:    [[ATOMICRMW_UMAX:%.*]] = atomicrmw umax ptr [[WORD]], i32 21 syncscope("singlethread") monotonic, align 4
197; CHECK-NEXT:    [[ATOMICRMW_UMIN:%.*]] = atomicrmw volatile umin ptr [[WORD]], i32 22 syncscope("singlethread") monotonic, align 4
198; CHECK-NEXT:    ret void
199;
200  %atomicrmw.xchg = atomicrmw xchg ptr %word, i32 12 monotonic, align 4
201  %atomicrmw.add = atomicrmw add ptr %word, i32 13 monotonic, align 4
202  %atomicrmw.sub = atomicrmw sub ptr %word, i32 14 monotonic, align 4
203  %atomicrmw.and = atomicrmw and ptr %word, i32 15 monotonic, align 4
204  %atomicrmw.nand = atomicrmw nand ptr %word, i32 16 monotonic, align 4
205  %atomicrmw.or = atomicrmw or ptr %word, i32 17 monotonic, align 4
206  %atomicrmw.xor = atomicrmw xor ptr %word, i32 18 monotonic, align 4
207  %atomicrmw.max = atomicrmw max ptr %word, i32 19 monotonic, align 4
208  %atomicrmw.min = atomicrmw volatile min ptr %word, i32 20 monotonic, align 4
209  %atomicrmw.umax = atomicrmw umax ptr %word, i32 21 syncscope("singlethread") monotonic, align 4
210  %atomicrmw.umin = atomicrmw volatile umin ptr %word, i32 22 syncscope("singlethread") monotonic, align 4
211  ret void
212}
213
214define void @foo(ptr addrspace(3) %val) {
215; CHECK-LABEL: define void @foo(
216; CHECK-SAME: ptr addrspace(3) [[VAL:%.*]]) #[[ATTR1:[0-9]+]] {
217; CHECK-NEXT:    [[VAL_CAST:%.*]] = addrspacecast ptr addrspace(3) [[VAL]] to ptr
218; CHECK-NEXT:    call void @can_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
219; CHECK-NEXT:    call void @can_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
220; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
221; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
222; CHECK-NEXT:    call void @can_not_infer_cmpxchg(ptr [[VAL_CAST]])
223; CHECK-NEXT:    call void @can_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
224; CHECK-NEXT:    call void @can_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
225; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g1 to ptr))
226; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr addrspacecast (ptr addrspace(1) @g2 to ptr))
227; CHECK-NEXT:    call void @can_not_infer_atomicrmw(ptr [[VAL_CAST]])
228; CHECK-NEXT:    ret void
229;
230  %g1.cast = addrspacecast ptr addrspace(1) @g1 to ptr
231  %g2.cast = addrspacecast ptr addrspace(1) @g2 to ptr
232  %val.cast = addrspacecast ptr addrspace(3) %val to ptr
233  call void @can_infer_cmpxchg(ptr %g1.cast)
234  call void @can_infer_cmpxchg(ptr %g2.cast)
235  call void @can_not_infer_cmpxchg(ptr %g1.cast)
236  call void @can_not_infer_cmpxchg(ptr %g2.cast)
237  call void @can_not_infer_cmpxchg(ptr %val.cast)
238  call void @can_infer_atomicrmw(ptr %g1.cast)
239  call void @can_infer_atomicrmw(ptr %g2.cast)
240  call void @can_not_infer_atomicrmw(ptr %g1.cast)
241  call void @can_not_infer_atomicrmw(ptr %g2.cast)
242  call void @can_not_infer_atomicrmw(ptr %val.cast)
243  ret void
244}
245
246define void @kernel_argument_promotion_pattern_intra_procedure(ptr %p, i32 %val) {
247; CHECK-LABEL: define void @kernel_argument_promotion_pattern_intra_procedure(
248; CHECK-SAME: ptr [[P:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] {
249; CHECK-NEXT:    [[P_CAST_0:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(1)
250; CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[P_CAST_0]], align 4
251; CHECK-NEXT:    ret void
252;
253  %p.cast.0 = addrspacecast ptr %p to ptr addrspace(1)
254  %p.cast.1 = addrspacecast ptr addrspace(1) %p.cast.0 to ptr
255  store i32 %val, ptr %p.cast.1
256  ret void
257}
258
259define internal void @use_argument_after_promotion(ptr %p, i32 %val) {
260; CHECK-LABEL: define internal void @use_argument_after_promotion(
261; CHECK-SAME: ptr [[P:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] {
262; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(1)
263; CHECK-NEXT:    store i32 [[VAL]], ptr addrspace(1) [[TMP1]], align 4
264; CHECK-NEXT:    ret void
265;
266  store i32 %val, ptr %p
267  ret void
268}
269
270define void @kernel_argument_promotion_pattern_inter_procedure(ptr %p, i32 %val) {
271; CHECK-LABEL: define void @kernel_argument_promotion_pattern_inter_procedure(
272; CHECK-SAME: ptr [[P:%.*]], i32 [[VAL:%.*]]) #[[ATTR0]] {
273; CHECK-NEXT:    call void @use_argument_after_promotion(ptr [[P]], i32 [[VAL]])
274; CHECK-NEXT:    ret void
275;
276  %p.cast.0 = addrspacecast ptr %p to ptr addrspace(1)
277  %p.cast.1 = addrspacecast ptr addrspace(1) %p.cast.0 to ptr
278  call void @use_argument_after_promotion(ptr %p.cast.1, i32 %val)
279  ret void
280}
281