xref: /llvm-project/llvm/test/CodeGen/AArch64/arm64-addrmode.ll (revision 048fc2bc102cff806613592829ff275c0f2b826f)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -aarch64-enable-subreg-liveness-tracking -mtriple=arm64-eabi < %s | FileCheck %s
3; rdar://10232252
4
5@object = external hidden global i64, section "__DATA, __objc_ivar", align 8
6
7; base + offset (imm9)
8define void @t1(ptr %object) {
9; CHECK-LABEL: t1:
10; CHECK:       // %bb.0:
11; CHECK-NEXT:    ldr xzr, [x0, #8]
12; CHECK-NEXT:    ret
13  %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 1
14  %tmp = load volatile i64, ptr %incdec.ptr, align 8
15  ret void
16}
17
18; base + offset (> imm9)
19define void @t2(ptr %object) {
20; CHECK-LABEL: t2:
21; CHECK:       // %bb.0:
22; CHECK-NEXT:    sub x8, x0, #264
23; CHECK-NEXT:    ldr xzr, [x8]
24; CHECK-NEXT:    ret
25  %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 -33
26  %tmp = load volatile i64, ptr %incdec.ptr, align 8
27  ret void
28}
29
30; base + unsigned offset (> imm9 and <= imm12 * size of type in bytes)
31define void @t3(ptr %object) {
32; CHECK-LABEL: t3:
33; CHECK:       // %bb.0:
34; CHECK-NEXT:    ldr xzr, [x0, #32760]
35; CHECK-NEXT:    ret
36  %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 4095
37  %tmp = load volatile i64, ptr %incdec.ptr, align 8
38  ret void
39}
40
41; base + unsigned offset (> imm12 * size of type in bytes)
42define void @t4(ptr %object) {
43; CHECK-LABEL: t4:
44; CHECK:       // %bb.0:
45; CHECK-NEXT:    mov w8, #32768 // =0x8000
46; CHECK-NEXT:    ldr xzr, [x0, x8]
47; CHECK-NEXT:    ret
48  %incdec.ptr = getelementptr inbounds i64, ptr %object, i64 4096
49  %tmp = load volatile i64, ptr %incdec.ptr, align 8
50  ret void
51}
52
53; base + reg
54define void @t5(i64 %a) {
55; CHECK-LABEL: t5:
56; CHECK:       // %bb.0:
57; CHECK-NEXT:    adrp x8, object
58; CHECK-NEXT:    add x8, x8, :lo12:object
59; CHECK-NEXT:    ldr xzr, [x8, x0, lsl #3]
60; CHECK-NEXT:    ret
61  %incdec.ptr = getelementptr inbounds i64, ptr @object, i64 %a
62  %tmp = load volatile i64, ptr %incdec.ptr, align 8
63  ret void
64}
65
66; base + reg + imm
67define void @t6(i64 %a, ptr %object) {
68; CHECK-LABEL: t6:
69; CHECK:       // %bb.0:
70; CHECK-NEXT:    add x8, x1, x0, lsl #3
71; CHECK-NEXT:    mov w9, #32768 // =0x8000
72; CHECK-NEXT:    ldr xzr, [x8, x9]
73; CHECK-NEXT:    ret
74  %tmp1 = getelementptr inbounds i64, ptr %object, i64 %a
75  %incdec.ptr = getelementptr inbounds i64, ptr %tmp1, i64 4096
76  %tmp = load volatile i64, ptr %incdec.ptr, align 8
77  ret void
78}
79
80; Test base + wide immediate
81define void @t7(i64 %a) {
82; CHECK-LABEL: t7:
83; CHECK:       // %bb.0:
84; CHECK-NEXT:    mov w8, #65535 // =0xffff
85; CHECK-NEXT:    ldr xzr, [x0, x8]
86; CHECK-NEXT:    ret
87  %1 = add i64 %a, 65535   ;0xffff
88  %2 = inttoptr i64 %1 to ptr
89  %3 = load volatile i64, ptr %2, align 8
90  ret void
91}
92
93define void @t8(i64 %a) {
94; CHECK-LABEL: t8:
95; CHECK:       // %bb.0:
96; CHECK-NEXT:    mov x8, #-4662 // =0xffffffffffffedca
97; CHECK-NEXT:    ldr xzr, [x0, x8]
98; CHECK-NEXT:    ret
99  %1 = sub i64 %a, 4662   ;-4662 is 0xffffffffffffedca
100  %2 = inttoptr i64 %1 to ptr
101  %3 = load volatile i64, ptr %2, align 8
102  ret void
103}
104
105define void @t9(i64 %a) {
106; CHECK-LABEL: t9:
107; CHECK:       // %bb.0:
108; CHECK-NEXT:    mov x8, #-305463297 // =0xffffffffedcaffff
109; CHECK-NEXT:    ldr xzr, [x0, x8]
110; CHECK-NEXT:    ret
111  %1 = add i64 -305463297, %a   ;-305463297 is 0xffffffffedcaffff
112  %2 = inttoptr i64 %1 to ptr
113  %3 = load volatile i64, ptr %2, align 8
114  ret void
115}
116
117define void @t10(i64 %a) {
118; CHECK-LABEL: t10:
119; CHECK:       // %bb.0:
120; CHECK-NEXT:    mov x8, #81909218222800896 // =0x123000000000000
121; CHECK-NEXT:    ldr xzr, [x0, x8]
122; CHECK-NEXT:    ret
123  %1 = add i64 %a, 81909218222800896   ;0x123000000000000
124  %2 = inttoptr i64 %1 to ptr
125  %3 = load volatile i64, ptr %2, align 8
126  ret void
127}
128
129define void @t11(i64 %a) {
130; CHECK-LABEL: t11:
131; CHECK:       // %bb.0:
132; CHECK-NEXT:    mov w8, #17767 // =0x4567
133; CHECK-NEXT:    movk w8, #291, lsl #16
134; CHECK-NEXT:    ldr xzr, [x0, x8]
135; CHECK-NEXT:    ret
136  %1 = add i64 %a, 19088743   ;0x1234567
137  %2 = inttoptr i64 %1 to ptr
138  %3 = load volatile i64, ptr %2, align 8
139  ret void
140}
141
142; Test some boundaries that should not use movz/movn/orr
143define void @t12(i64 %a) {
144; CHECK-LABEL: t12:
145; CHECK:       // %bb.0:
146; CHECK-NEXT:    add x8, x0, #4095
147; CHECK-NEXT:    ldr xzr, [x8]
148; CHECK-NEXT:    ret
149  %1 = add i64 %a, 4095   ;0xfff
150  %2 = inttoptr i64 %1 to ptr
151  %3 = load volatile i64, ptr %2, align 8
152  ret void
153}
154
155define void @t13(i64 %a) {
156; CHECK-LABEL: t13:
157; CHECK:       // %bb.0:
158; CHECK-NEXT:    sub x8, x0, #4095
159; CHECK-NEXT:    ldr xzr, [x8]
160; CHECK-NEXT:    ret
161  %1 = add i64 %a, -4095   ;-0xfff
162  %2 = inttoptr i64 %1 to ptr
163  %3 = load volatile i64, ptr %2, align 8
164  ret void
165}
166
167define void @t14(i64 %a) {
168; CHECK-LABEL: t14:
169; CHECK:       // %bb.0:
170; CHECK-NEXT:    add x8, x0, #291, lsl #12 // =1191936
171; CHECK-NEXT:    ldr xzr, [x8]
172; CHECK-NEXT:    ret
173  %1 = add i64 %a, 1191936   ;0x123000
174  %2 = inttoptr i64 %1 to ptr
175  %3 = load volatile i64, ptr %2, align 8
176  ret void
177}
178
179define void @t15(i64 %a) {
180; CHECK-LABEL: t15:
181; CHECK:       // %bb.0:
182; CHECK-NEXT:    sub x8, x0, #291, lsl #12 // =1191936
183; CHECK-NEXT:    ldr xzr, [x8]
184; CHECK-NEXT:    ret
185  %1 = add i64 %a, -1191936   ;0xFFFFFFFFFFEDD000
186  %2 = inttoptr i64 %1 to ptr
187  %3 = load volatile i64, ptr %2, align 8
188  ret void
189}
190
191define void @t16(i64 %a) {
192; CHECK-LABEL: t16:
193; CHECK:       // %bb.0:
194; CHECK-NEXT:    ldr xzr, [x0, #28672]
195; CHECK-NEXT:    ret
196  %1 = add i64 %a, 28672   ;0x7000
197  %2 = inttoptr i64 %1 to ptr
198  %3 = load volatile i64, ptr %2, align 8
199  ret void
200}
201
202define void @t17(i64 %a) {
203; CHECK-LABEL: t17:
204; CHECK:       // %bb.0:
205; CHECK-NEXT:    ldur xzr, [x0, #-256]
206; CHECK-NEXT:    ret
207  %1 = add i64 %a, -256   ;-0x100
208  %2 = inttoptr i64 %1 to ptr
209  %3 = load volatile i64, ptr %2, align 8
210  ret void
211}
212
213; LDRBBroX
214define i8 @LdOffset_i8(ptr %a)  {
215; CHECK-LABEL: LdOffset_i8:
216; CHECK:       // %bb.0:
217; CHECK-NEXT:    add x8, x0, #253, lsl #12 // =1036288
218; CHECK-NEXT:    ldrb w0, [x8, #3704]
219; CHECK-NEXT:    ret
220  %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
221  %val = load i8, ptr %arrayidx, align 1
222  ret i8 %val
223}
224
225; LDRBBroX
226define i32 @LdOffset_i8_zext32(ptr %a)  {
227; CHECK-LABEL: LdOffset_i8_zext32:
228; CHECK:       // %bb.0:
229; CHECK-NEXT:    add x8, x0, #253, lsl #12 // =1036288
230; CHECK-NEXT:    ldrb w0, [x8, #3704]
231; CHECK-NEXT:    ret
232  %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
233  %val = load i8, ptr %arrayidx, align 1
234  %conv = zext i8 %val to i32
235  ret i32 %conv
236}
237
238; LDRSBWroX
239define i32 @LdOffset_i8_sext32(ptr %a)  {
240; CHECK-LABEL: LdOffset_i8_sext32:
241; CHECK:       // %bb.0:
242; CHECK-NEXT:    add x8, x0, #253, lsl #12 // =1036288
243; CHECK-NEXT:    ldrsb w0, [x8, #3704]
244; CHECK-NEXT:    ret
245  %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
246  %val = load i8, ptr %arrayidx, align 1
247  %conv = sext i8 %val to i32
248  ret i32 %conv
249}
250
251; LDRBBroX
252define i64 @LdOffset_i8_zext64(ptr %a)  {
253; CHECK-LABEL: LdOffset_i8_zext64:
254; CHECK:       // %bb.0:
255; CHECK-NEXT:    add x8, x0, #253, lsl #12 // =1036288
256; CHECK-NEXT:    ldrb w0, [x8, #3704]
257; CHECK-NEXT:    ret
258  %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
259  %val = load i8, ptr %arrayidx, align 1
260  %conv = zext i8 %val to i64
261  ret i64 %conv
262}
263
264; LDRSBXroX
265define i64 @LdOffset_i8_sext64(ptr %a)  {
266; CHECK-LABEL: LdOffset_i8_sext64:
267; CHECK:       // %bb.0:
268; CHECK-NEXT:    add x8, x0, #253, lsl #12 // =1036288
269; CHECK-NEXT:    ldrsb x0, [x8, #3704]
270; CHECK-NEXT:    ret
271  %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
272  %val = load i8, ptr %arrayidx, align 1
273  %conv = sext i8 %val to i64
274  ret i64 %conv
275}
276
277; LDRHHroX
278define i16 @LdOffset_i16(ptr %a)  {
279; CHECK-LABEL: LdOffset_i16:
280; CHECK:       // %bb.0:
281; CHECK-NEXT:    add x8, x0, #506, lsl #12 // =2072576
282; CHECK-NEXT:    ldrh w0, [x8, #7408]
283; CHECK-NEXT:    ret
284  %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
285  %val = load i16, ptr %arrayidx, align 2
286  ret i16 %val
287}
288
289; LDRHHroX
290define i32 @LdOffset_i16_zext32(ptr %a)  {
291; CHECK-LABEL: LdOffset_i16_zext32:
292; CHECK:       // %bb.0:
293; CHECK-NEXT:    add x8, x0, #506, lsl #12 // =2072576
294; CHECK-NEXT:    ldrh w0, [x8, #7408]
295; CHECK-NEXT:    ret
296  %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
297  %val = load i16, ptr %arrayidx, align 2
298  %conv = zext i16 %val to i32
299  ret i32 %conv
300}
301
302; LDRSHWroX
303define i32 @LdOffset_i16_sext32(ptr %a)  {
304; CHECK-LABEL: LdOffset_i16_sext32:
305; CHECK:       // %bb.0:
306; CHECK-NEXT:    add x8, x0, #506, lsl #12 // =2072576
307; CHECK-NEXT:    ldrsh w0, [x8, #7408]
308; CHECK-NEXT:    ret
309  %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
310  %val = load i16, ptr %arrayidx, align 2
311  %conv = sext i16 %val to i32
312  ret i32 %conv
313}
314
315; LDRHHroX
316define i64 @LdOffset_i16_zext64(ptr %a)  {
317; CHECK-LABEL: LdOffset_i16_zext64:
318; CHECK:       // %bb.0:
319; CHECK-NEXT:    add x8, x0, #506, lsl #12 // =2072576
320; CHECK-NEXT:    ldrh w0, [x8, #7408]
321; CHECK-NEXT:    ret
322  %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
323  %val = load i16, ptr %arrayidx, align 2
324  %conv = zext i16 %val to i64
325  ret i64 %conv
326}
327
328; LDRSHXroX
329define i64 @LdOffset_i16_sext64(ptr %a)  {
330; CHECK-LABEL: LdOffset_i16_sext64:
331; CHECK:       // %bb.0:
332; CHECK-NEXT:    add x8, x0, #506, lsl #12 // =2072576
333; CHECK-NEXT:    ldrsh x0, [x8, #7408]
334; CHECK-NEXT:    ret
335  %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
336  %val = load i16, ptr %arrayidx, align 2
337  %conv = sext i16 %val to i64
338  ret i64 %conv
339}
340
341; LDRWroX
342define i32 @LdOffset_i32(ptr %a)  {
343; CHECK-LABEL: LdOffset_i32:
344; CHECK:       // %bb.0:
345; CHECK-NEXT:    add x8, x0, #1012, lsl #12 // =4145152
346; CHECK-NEXT:    ldr w0, [x8, #14816]
347; CHECK-NEXT:    ret
348  %arrayidx = getelementptr inbounds i32, ptr %a, i64 1039992
349  %val = load i32, ptr %arrayidx, align 4
350  ret i32 %val
351}
352
353; LDRWroX
354define i64 @LdOffset_i32_zext64(ptr %a)  {
355; CHECK-LABEL: LdOffset_i32_zext64:
356; CHECK:       // %bb.0:
357; CHECK-NEXT:    add x8, x0, #1012, lsl #12 // =4145152
358; CHECK-NEXT:    ldr w0, [x8, #14816]
359; CHECK-NEXT:    ret
360  %arrayidx = getelementptr inbounds i32, ptr %a, i64 1039992
361  %val = load i32, ptr %arrayidx, align 2
362  %conv = zext i32 %val to i64
363  ret i64 %conv
364}
365
366; LDRSWroX
367define i64 @LdOffset_i32_sext64(ptr %a)  {
368; CHECK-LABEL: LdOffset_i32_sext64:
369; CHECK:       // %bb.0:
370; CHECK-NEXT:    add x8, x0, #1012, lsl #12 // =4145152
371; CHECK-NEXT:    ldrsw x0, [x8, #14816]
372; CHECK-NEXT:    ret
373  %arrayidx = getelementptr inbounds i32, ptr %a, i64 1039992
374  %val = load i32, ptr %arrayidx, align 2
375  %conv = sext i32 %val to i64
376  ret i64 %conv
377}
378
379; LDRXroX
380define i64 @LdOffset_i64(ptr %a)  {
381; CHECK-LABEL: LdOffset_i64:
382; CHECK:       // %bb.0:
383; CHECK-NEXT:    add x8, x0, #2024, lsl #12 // =8290304
384; CHECK-NEXT:    ldr x0, [x8, #29632]
385; CHECK-NEXT:    ret
386  %arrayidx = getelementptr inbounds i64, ptr %a, i64 1039992
387  %val = load i64, ptr %arrayidx, align 4
388  ret i64 %val
389}
390
391; LDRDroX
392define <2 x i32> @LdOffset_v2i32(ptr %a)  {
393; CHECK-LABEL: LdOffset_v2i32:
394; CHECK:       // %bb.0:
395; CHECK-NEXT:    add x8, x0, #2024, lsl #12 // =8290304
396; CHECK-NEXT:    ldr d0, [x8, #29632]
397; CHECK-NEXT:    ret
398  %arrayidx = getelementptr inbounds <2 x i32>, ptr %a, i64 1039992
399  %val = load <2 x i32>, ptr %arrayidx, align 4
400  ret <2 x i32> %val
401}
402
403; LDRQroX
404define <2 x i64> @LdOffset_v2i64(ptr %a)  {
405; CHECK-LABEL: LdOffset_v2i64:
406; CHECK:       // %bb.0:
407; CHECK-NEXT:    add x8, x0, #4048, lsl #12 // =16580608
408; CHECK-NEXT:    ldr q0, [x8, #59264]
409; CHECK-NEXT:    ret
410  %arrayidx = getelementptr inbounds <2 x i64>, ptr %a, i64 1039992
411  %val = load <2 x i64>, ptr %arrayidx, align 4
412  ret <2 x i64> %val
413}
414
415; LDRSBWroX
416define double @LdOffset_i8_f64(ptr %a)  {
417; CHECK-LABEL: LdOffset_i8_f64:
418; CHECK:       // %bb.0:
419; CHECK-NEXT:    add x8, x0, #253, lsl #12 // =1036288
420; CHECK-NEXT:    ldrsb w8, [x8, #3704]
421; CHECK-NEXT:    scvtf d0, w8
422; CHECK-NEXT:    ret
423  %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039992
424  %val = load i8, ptr %arrayidx, align 1
425  %conv = sitofp i8 %val to double
426  ret double %conv
427}
428
429; LDRSHWroX
430define double @LdOffset_i16_f64(ptr %a)  {
431; CHECK-LABEL: LdOffset_i16_f64:
432; CHECK:       // %bb.0:
433; CHECK-NEXT:    add x8, x0, #506, lsl #12 // =2072576
434; CHECK-NEXT:    ldrsh w8, [x8, #7408]
435; CHECK-NEXT:    scvtf d0, w8
436; CHECK-NEXT:    ret
437  %arrayidx = getelementptr inbounds i16, ptr %a, i64 1039992
438  %val = load i16, ptr %arrayidx, align 2
439  %conv = sitofp i16 %val to double
440  ret double %conv
441}
442
443; LDRSroX
444define double @LdOffset_i32_f64(ptr %a)  {
445; CHECK-LABEL: LdOffset_i32_f64:
446; CHECK:       // %bb.0:
447; CHECK-NEXT:    add x8, x0, #1012, lsl #12 // =4145152
448; CHECK-NEXT:    ldr s0, [x8, #14816]
449; CHECK-NEXT:    ucvtf d0, d0
450; CHECK-NEXT:    ret
451  %arrayidx = getelementptr inbounds i32, ptr %a, i64 1039992
452  %val = load i32, ptr %arrayidx, align 4
453  %conv = uitofp i32 %val to double
454  ret double %conv
455}
456
457; LDRDroX
458define double @LdOffset_i64_f64(ptr %a)  {
459; CHECK-LABEL: LdOffset_i64_f64:
460; CHECK:       // %bb.0:
461; CHECK-NEXT:    add x8, x0, #2024, lsl #12 // =8290304
462; CHECK-NEXT:    ldr d0, [x8, #29632]
463; CHECK-NEXT:    scvtf d0, d0
464; CHECK-NEXT:    ret
465  %arrayidx = getelementptr inbounds i64, ptr %a, i64 1039992
466  %val = load i64, ptr %arrayidx, align 8
467  %conv = sitofp i64 %val to double
468  ret double %conv
469}
470
471define i64 @LdOffset_i64_multi_offset(ptr %a) {
472; CHECK-LABEL: LdOffset_i64_multi_offset:
473; CHECK:       // %bb.0:
474; CHECK-NEXT:    add x8, x0, #2031, lsl #12 // =8318976
475; CHECK-NEXT:    ldr x9, [x8, #960]
476; CHECK-NEXT:    ldr x8, [x8, #3016]
477; CHECK-NEXT:    add x0, x8, x9
478; CHECK-NEXT:    ret
479  %arrayidx = getelementptr inbounds i64, ptr %a, i64 1039992
480  %val0 = load i64, ptr %arrayidx, align 8
481  %arrayidx1 = getelementptr inbounds i64, ptr %a, i64 1040249
482  %val1 = load i64, ptr %arrayidx1, align 8
483  %add = add nsw i64 %val1, %val0
484  ret i64 %add
485}
486
487define i64 @LdOffset_i64_multi_offset_with_commmon_base(ptr %a) {
488; CHECK-LABEL: LdOffset_i64_multi_offset_with_commmon_base:
489; CHECK:       // %bb.0:
490; CHECK-NEXT:    add x8, x0, #507, lsl #12 // =2076672
491; CHECK-NEXT:    ldr x9, [x8, #26464]
492; CHECK-NEXT:    ldr x8, [x8, #26496]
493; CHECK-NEXT:    add x0, x8, x9
494; CHECK-NEXT:    ret
495  %b = getelementptr inbounds i16, ptr %a, i64 1038336
496  %arrayidx = getelementptr inbounds i64, ptr %b, i64 3308
497  %val0 = load i64, ptr %arrayidx, align 8
498  %arrayidx1 = getelementptr inbounds i64, ptr %b, i64 3312
499  %val1 = load i64, ptr %arrayidx1, align 8
500  %add = add nsw i64 %val1, %val0
501  ret i64 %add
502}
503
504; Negative test: the offset is odd
505define i32 @LdOffset_i16_odd_offset(ptr nocapture noundef readonly %a)  {
506; CHECK-LABEL: LdOffset_i16_odd_offset:
507; CHECK:       // %bb.0:
508; CHECK-NEXT:    mov w8, #56953 // =0xde79
509; CHECK-NEXT:    movk w8, #15, lsl #16
510; CHECK-NEXT:    ldrsh w0, [x0, x8]
511; CHECK-NEXT:    ret
512  %arrayidx = getelementptr inbounds i8, ptr %a, i64 1039993
513  %val = load i16, ptr %arrayidx, align 2
514  %conv = sext i16 %val to i32
515  ret i32 %conv
516}
517
518; Already encoded with a single mov MOVNWi
519define i8 @LdOffset_i8_movnwi(ptr %a)  {
520; CHECK-LABEL: LdOffset_i8_movnwi:
521; CHECK:       // %bb.0:
522; CHECK-NEXT:    mov w8, #16777215 // =0xffffff
523; CHECK-NEXT:    ldrb w0, [x0, x8]
524; CHECK-NEXT:    ret
525  %arrayidx = getelementptr inbounds i8, ptr %a, i64 16777215
526  %val = load i8, ptr %arrayidx, align 1
527  ret i8 %val
528}
529
530; Negative test: the offset is too large to encoded with a add
531define i8 @LdOffset_i8_too_large(ptr %a)  {
532; CHECK-LABEL: LdOffset_i8_too_large:
533; CHECK:       // %bb.0:
534; CHECK-NEXT:    mov w8, #1 // =0x1
535; CHECK-NEXT:    movk w8, #256, lsl #16
536; CHECK-NEXT:    ldrb w0, [x0, x8]
537; CHECK-NEXT:    ret
538  %arrayidx = getelementptr inbounds i8, ptr %a, i64 16777217
539  %val = load i8, ptr %arrayidx, align 1
540  ret i8 %val
541}
542