xref: /llvm-project/llvm/test/Instrumentation/BoundsChecking/simple.ll (revision 5bb650345d83669434713146aaa431c1f7ad43d6)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes='bounds-checking<merge>' -S | FileCheck %s
3target datalayout = "e-p:64:64:64-p1:16:16:16-p2:64:64:64:48-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4
5@.str = private constant [8 x i8] c"abcdefg\00"
6
7@.str_as1 = private addrspace(1) constant [8 x i8] c"abcdefg\00"
8
9@.str_as2 = private addrspace(2) constant [8 x i8] c"abcdefg\00"
10
11
12declare noalias ptr @malloc(i64) nounwind allocsize(0)
13declare noalias ptr @calloc(i64, i64) nounwind allocsize(0,1)
14declare noalias ptr @realloc(ptr nocapture allocptr, i64) nounwind allocsize(1)
15
16define void @f1() nounwind {
17; CHECK-LABEL: @f1(
18; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @malloc(i64 32)
19; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 2
20; CHECK-NEXT:    store i32 3, ptr [[IDX]], align 4
21; CHECK-NEXT:    ret void
22;
23  %1 = tail call ptr @malloc(i64 32)
24  %idx = getelementptr inbounds i32, ptr %1, i64 2
25  store i32 3, ptr %idx, align 4
26  ret void
27}
28
29define void @f2() nounwind {
30; CHECK-LABEL: @f2(
31; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @malloc(i64 32)
32; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8
33; CHECK-NEXT:    br label [[TRAP:%.*]]
34; CHECK:       2:
35; CHECK-NEXT:    store i32 3, ptr [[IDX]], align 4
36; CHECK-NEXT:    ret void
37; CHECK:       trap:
38; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6:[0-9]+]]
39; CHECK-NEXT:    unreachable
40;
41  %1 = tail call ptr @malloc(i64 32)
42  %idx = getelementptr inbounds i32, ptr %1, i64 8
43  store i32 3, ptr %idx, align 4
44  ret void
45}
46
47define void @f3(i64 %x) nounwind {
48; CHECK-LABEL: @f3(
49; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 4, [[X:%.*]]
50; CHECK-NEXT:    [[TMP2:%.*]] = tail call ptr @calloc(i64 4, i64 [[X]])
51; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 8
52; CHECK-NEXT:    [[TMP3:%.*]] = sub i64 [[TMP1]], 32
53; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP1]], 32
54; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i64 [[TMP3]], 4
55; CHECK-NEXT:    [[TMP6:%.*]] = or i1 [[TMP4]], [[TMP5]]
56; CHECK-NEXT:    [[TMP7:%.*]] = or i1 false, [[TMP6]]
57; CHECK-NEXT:    br i1 [[TMP7]], label [[TRAP:%.*]], label [[TMP8:%.*]]
58; CHECK:       8:
59; CHECK-NEXT:    store i32 3, ptr [[IDX]], align 4
60; CHECK-NEXT:    ret void
61; CHECK:       trap:
62; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
63; CHECK-NEXT:    unreachable
64;
65  %1 = tail call ptr @calloc(i64 4, i64 %x)
66  %idx = getelementptr inbounds i32, ptr %1, i64 8
67  store i32 3, ptr %idx, align 4
68  ret void
69}
70
71define void @store_volatile(i64 %x) nounwind {
72; CHECK-LABEL: @store_volatile(
73; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @calloc(i64 4, i64 [[X:%.*]])
74; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8
75; CHECK-NEXT:    store volatile i32 3, ptr [[IDX]], align 4
76; CHECK-NEXT:    ret void
77;
78  %1 = tail call ptr @calloc(i64 4, i64 %x)
79  %idx = getelementptr inbounds i32, ptr %1, i64 8
80  store volatile i32 3, ptr %idx, align 4
81  ret void
82}
83
84define void @f4(i64 %x) nounwind {
85; CHECK-LABEL: @f4(
86; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @realloc(ptr null, i64 [[X:%.*]]) #[[ATTR3:[0-9]+]]
87; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 8
88; CHECK-NEXT:    [[TMP2:%.*]] = sub i64 [[X]], 32
89; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i64 [[X]], 32
90; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP2]], 4
91; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
92; CHECK-NEXT:    [[TMP6:%.*]] = or i1 false, [[TMP5]]
93; CHECK-NEXT:    br i1 [[TMP6]], label [[TRAP:%.*]], label [[TMP7:%.*]]
94; CHECK:       7:
95; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr [[IDX]], align 4
96; CHECK-NEXT:    ret void
97; CHECK:       trap:
98; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
99; CHECK-NEXT:    unreachable
100;
101  %1 = tail call ptr @realloc(ptr null, i64 %x) nounwind
102  %idx = getelementptr inbounds i32, ptr %1, i64 8
103  %2 = load i32, ptr %idx, align 4
104  ret void
105}
106
107define void @f5(i64 %x) nounwind {
108; CHECK-LABEL: @f5(
109; CHECK-NEXT:    [[TMP1:%.*]] = add i64 0, [[X:%.*]]
110; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds [8 x i8], ptr @.str, i64 0, i64 [[X]]
111; CHECK-NEXT:    [[TMP2:%.*]] = sub i64 8, [[TMP1]]
112; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i64 8, [[TMP1]]
113; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP2]], 1
114; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
115; CHECK-NEXT:    br i1 [[TMP5]], label [[TRAP:%.*]], label [[TMP6:%.*]]
116; CHECK:       6:
117; CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr [[IDX]], align 4
118; CHECK-NEXT:    ret void
119; CHECK:       trap:
120; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
121; CHECK-NEXT:    unreachable
122;
123  %idx = getelementptr inbounds [8 x i8], ptr @.str, i64 0, i64 %x
124  %1 = load i8, ptr %idx, align 4
125  ret void
126}
127
128define void @f5_as1(i64 %x) nounwind {
129; CHECK-LABEL: @f5_as1(
130; CHECK-NEXT:    [[X_C:%.*]] = trunc i64 [[X:%.*]] to i16
131; CHECK-NEXT:    [[TMP1:%.*]] = add i16 0, [[X_C]]
132; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds [8 x i8], ptr addrspace(1) @.str_as1, i64 0, i64 [[X]]
133; CHECK-NEXT:    [[TMP2:%.*]] = sub i16 8, [[TMP1]]
134; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i16 8, [[TMP1]]
135; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i16 [[TMP2]], 1
136; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
137; CHECK-NEXT:    br i1 [[TMP5]], label [[TRAP:%.*]], label [[TMP6:%.*]]
138; CHECK:       6:
139; CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr addrspace(1) [[IDX]], align 4
140; CHECK-NEXT:    ret void
141; CHECK:       trap:
142; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
143; CHECK-NEXT:    unreachable
144;
145  %idx = getelementptr inbounds [8 x i8], ptr addrspace(1) @.str_as1, i64 0, i64 %x
146  %1 = load i8, ptr addrspace(1) %idx, align 4
147  ret void
148}
149
150define void @f5_as2(i32 %x) nounwind {;
151; CHECK-LABEL: @f5_as2(
152; CHECK-NEXT:    [[X_C:%.*]] = sext i32 [[X:%.*]] to i48
153; CHECK-NEXT:    [[TMP1:%.*]] = add i48 0, [[X_C]]
154; CHECK-NEXT:    [[IDX:%.*]] = getelementptr inbounds [8 x i8], ptr addrspace(2) @.str_as2, i32 0, i32 [[X]]
155; CHECK-NEXT:    [[TMP2:%.*]] = sub i48 8, [[TMP1]]
156; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i48 8, [[TMP1]]
157; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i48 [[TMP2]], 1
158; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
159; CHECK-NEXT:    br i1 [[TMP5]], label [[TRAP:%.*]], label [[TMP6:%.*]]
160; CHECK:       6:
161; CHECK-NEXT:    [[TMP7:%.*]] = load i8, ptr addrspace(2) [[IDX]], align 4
162; CHECK-NEXT:    ret void
163; CHECK:       trap:
164; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
165; CHECK-NEXT:    unreachable
166;
167  %idx = getelementptr inbounds [8 x i8], ptr addrspace(2) @.str_as2, i32 0, i32 %x
168  %1 = load i8, ptr addrspace(2) %idx, align 4
169  ret void
170}
171
172define void @f6(i64 %x) nounwind {
173; CHECK-LABEL: @f6(
174; CHECK-NEXT:    [[TMP1:%.*]] = alloca i128, align 8
175; CHECK-NEXT:    [[TMP2:%.*]] = load i128, ptr [[TMP1]], align 4
176; CHECK-NEXT:    ret void
177;
178  %1 = alloca i128
179  %2 = load i128, ptr %1, align 4
180  ret void
181}
182
183define void @f7(i64 %x) nounwind {
184; CHECK-LABEL: @f7(
185; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 16, [[X:%.*]]
186; CHECK-NEXT:    [[TMP2:%.*]] = alloca i128, i64 [[X]], align 8
187; CHECK-NEXT:    [[TMP3:%.*]] = sub i64 [[TMP1]], 0
188; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 16
189; CHECK-NEXT:    [[TMP5:%.*]] = or i1 false, [[TMP4]]
190; CHECK-NEXT:    [[TMP6:%.*]] = or i1 false, [[TMP5]]
191; CHECK-NEXT:    br i1 [[TMP6]], label [[TRAP:%.*]], label [[TMP7:%.*]]
192; CHECK:       7:
193; CHECK-NEXT:    [[TMP8:%.*]] = load i128, ptr [[TMP2]], align 4
194; CHECK-NEXT:    ret void
195; CHECK:       trap:
196; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
197; CHECK-NEXT:    unreachable
198;
199  %1 = alloca i128, i64 %x
200  %2 = load i128, ptr %1, align 4
201  ret void
202}
203
204define void @f8() nounwind {
205; CHECK-LABEL: @f8(
206; CHECK-NEXT:    [[TMP1:%.*]] = alloca i128, align 8
207; CHECK-NEXT:    [[TMP2:%.*]] = alloca i128, align 8
208; CHECK-NEXT:    [[TMP3:%.*]] = select i1 undef, ptr [[TMP1]], ptr [[TMP2]]
209; CHECK-NEXT:    [[TMP4:%.*]] = load i128, ptr [[TMP3]], align 4
210; CHECK-NEXT:    ret void
211;
212  %1 = alloca i128
213  %2 = alloca i128
214  %3 = select i1 undef, ptr %1, ptr %2
215  %4 = load i128, ptr %3, align 4
216  ret void
217}
218
219define void @f9(ptr %arg) nounwind {
220; CHECK-LABEL: @f9(
221; CHECK-NEXT:    [[TMP1:%.*]] = alloca i128, align 8
222; CHECK-NEXT:    [[TMP2:%.*]] = select i1 undef, ptr [[ARG:%.*]], ptr [[TMP1]]
223; CHECK-NEXT:    [[TMP3:%.*]] = load i128, ptr [[TMP2]], align 4
224; CHECK-NEXT:    ret void
225;
226  %1 = alloca i128
227  %2 = select i1 undef, ptr %arg, ptr %1
228  %3 = load i128, ptr %2, align 4
229  ret void
230}
231
232define void @f10(i64 %x, i64 %y) nounwind {
233; CHECK-LABEL: @f10(
234; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 16, [[X:%.*]]
235; CHECK-NEXT:    [[TMP2:%.*]] = alloca i128, i64 [[X]], align 8
236; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 16, [[Y:%.*]]
237; CHECK-NEXT:    [[TMP4:%.*]] = alloca i128, i64 [[Y]], align 8
238; CHECK-NEXT:    [[TMP5:%.*]] = select i1 undef, i64 [[TMP1]], i64 [[TMP3]]
239; CHECK-NEXT:    [[TMP6:%.*]] = select i1 undef, ptr [[TMP2]], ptr [[TMP4]]
240; CHECK-NEXT:    [[TMP7:%.*]] = sub i64 [[TMP5]], 0
241; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i64 [[TMP7]], 16
242; CHECK-NEXT:    [[TMP9:%.*]] = or i1 false, [[TMP8]]
243; CHECK-NEXT:    [[TMP10:%.*]] = or i1 false, [[TMP9]]
244; CHECK-NEXT:    br i1 [[TMP10]], label [[TRAP:%.*]], label [[TMP11:%.*]]
245; CHECK:       11:
246; CHECK-NEXT:    [[TMP12:%.*]] = load i128, ptr [[TMP6]], align 4
247; CHECK-NEXT:    ret void
248; CHECK:       trap:
249; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
250; CHECK-NEXT:    unreachable
251;
252  %1 = alloca i128, i64 %x
253  %2 = alloca i128, i64 %y
254  %3 = select i1 undef, ptr %1, ptr %2
255  %4 = load i128, ptr %3, align 4
256  ret void
257}
258
259define void @f11(ptr byval(i128) %x) nounwind {
260; CHECK-LABEL: @f11(
261; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i64 16
262; CHECK-NEXT:    br label [[TRAP:%.*]]
263; CHECK:       2:
264; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr [[TMP1]], align 4
265; CHECK-NEXT:    ret void
266; CHECK:       trap:
267; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
268; CHECK-NEXT:    unreachable
269;
270  %1 = getelementptr inbounds i8, ptr %x, i64 16
271  %2 = load i8, ptr %1, align 4
272  ret void
273}
274
275define void @f11_as1(ptr addrspace(1) byval(i128) %x) nounwind {
276; CHECK-LABEL: @f11_as1(
277; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[X:%.*]], i16 16
278; CHECK-NEXT:    br label [[TRAP:%.*]]
279; CHECK:       2:
280; CHECK-NEXT:    [[TMP3:%.*]] = load i8, ptr addrspace(1) [[TMP1]], align 4
281; CHECK-NEXT:    ret void
282; CHECK:       trap:
283; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
284; CHECK-NEXT:    unreachable
285;
286  %1 = getelementptr inbounds i8, ptr addrspace(1) %x, i16 16
287  %2 = load i8, ptr addrspace(1) %1, align 4
288  ret void
289}
290
291define i64 @f12(i64 %x, i64 %y) nounwind {
292; CHECK-LABEL: @f12(
293; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 1, [[X:%.*]]
294; CHECK-NEXT:    [[TMP2:%.*]] = tail call ptr @calloc(i64 1, i64 [[X]])
295; CHECK-NEXT:    [[DOTIDX:%.*]] = mul i64 [[Y:%.*]], 8
296; CHECK-NEXT:    [[TMP3:%.*]] = add i64 0, [[DOTIDX]]
297; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i64 [[Y]]
298; CHECK-NEXT:    [[TMP5:%.*]] = sub i64 [[TMP1]], [[TMP3]]
299; CHECK-NEXT:    [[TMP6:%.*]] = icmp ult i64 [[TMP1]], [[TMP3]]
300; CHECK-NEXT:    [[TMP7:%.*]] = icmp ult i64 [[TMP5]], 8
301; CHECK-NEXT:    [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
302; CHECK-NEXT:    [[TMP9:%.*]] = icmp slt i64 [[TMP3]], 0
303; CHECK-NEXT:    [[TMP10:%.*]] = or i1 [[TMP9]], [[TMP8]]
304; CHECK-NEXT:    br i1 [[TMP10]], label [[TRAP:%.*]], label [[TMP11:%.*]]
305; CHECK:       11:
306; CHECK-NEXT:    [[TMP12:%.*]] = load i64, ptr [[TMP4]], align 8
307; CHECK-NEXT:    ret i64 [[TMP12]]
308; CHECK:       trap:
309; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
310; CHECK-NEXT:    unreachable
311;
312  %1 = tail call ptr @calloc(i64 1, i64 %x)
313  %2 = getelementptr inbounds i64, ptr %1, i64 %y
314  %3 = load i64, ptr %2, align 8
315  ret i64 %3
316}
317
318define i64 @load_volatile(i64 %x, i64 %y) nounwind {
319; CHECK-LABEL: @load_volatile(
320; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @calloc(i64 1, i64 [[X:%.*]])
321; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 [[Y:%.*]]
322; CHECK-NEXT:    [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 8
323; CHECK-NEXT:    ret i64 [[TMP3]]
324;
325  %1 = tail call ptr @calloc(i64 1, i64 %x)
326  %2 = getelementptr inbounds i64, ptr %1, i64 %y
327  %3 = load volatile i64, ptr %2, align 8
328  ret i64 %3
329}
330
331; PR17402
332define void @f13() nounwind {
333; CHECK-LABEL: @f13(
334; CHECK-NEXT:  entry:
335; CHECK-NEXT:    br label [[ALIVE:%.*]]
336; CHECK:       dead:
337; CHECK-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[INCDEC_PTR]], i64 1
338; CHECK-NEXT:    [[L:%.*]] = load i32, ptr [[INCDEC_PTR]], align 4
339; CHECK-NEXT:    br label [[ALIVE]]
340; CHECK:       alive:
341; CHECK-NEXT:    ret void
342;
343entry:
344  br label %alive
345
346dead:
347  ; Self-refential GEPs can occur in dead code.
348  %incdec.ptr = getelementptr inbounds i32, ptr %incdec.ptr, i64 1
349  %l = load i32, ptr %incdec.ptr
350  br label %alive
351
352alive:
353  ret void
354}
355
356; Check that merging sizes in a phi works.
357define i8 @f14(i1 %i) {
358; CHECK-LABEL: @f14(
359; CHECK-NEXT:  entry:
360; CHECK-NEXT:    br i1 [[I:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
361; CHECK:       bb1:
362; CHECK-NEXT:    [[A:%.*]] = alloca [32 x i8], align 1
363; CHECK-NEXT:    [[G:%.*]] = getelementptr i8, ptr [[A]], i32 32
364; CHECK-NEXT:    br label [[BB2]]
365; CHECK:       bb2:
366; CHECK-NEXT:    [[TMP0:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ 32, [[BB1]] ]
367; CHECK-NEXT:    [[TMP1:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 32, [[BB1]] ]
368; CHECK-NEXT:    [[ALLOC:%.*]] = phi ptr [ null, [[ENTRY]] ], [ [[G]], [[BB1]] ]
369; CHECK-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ -4, [[BB1]] ]
370; CHECK-NEXT:    [[TMP2:%.*]] = add i64 [[TMP1]], [[IND]]
371; CHECK-NEXT:    [[P:%.*]] = getelementptr i8, ptr [[ALLOC]], i64 [[IND]]
372; CHECK-NEXT:    [[TMP3:%.*]] = sub i64 [[TMP0]], [[TMP2]]
373; CHECK-NEXT:    [[TMP4:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
374; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i64 [[TMP3]], 1
375; CHECK-NEXT:    [[TMP6:%.*]] = or i1 [[TMP4]], [[TMP5]]
376; CHECK-NEXT:    br i1 [[TMP6]], label [[TRAP:%.*]], label [[TMP7:%.*]]
377; CHECK:       7:
378; CHECK-NEXT:    [[RET:%.*]] = load i8, ptr [[P]], align 1
379; CHECK-NEXT:    ret i8 [[RET]]
380; CHECK:       trap:
381; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
382; CHECK-NEXT:    unreachable
383;
384entry:
385  br i1 %i, label %bb1, label %bb2
386
387bb1:
388  %a = alloca [32 x i8]
389  %g = getelementptr i8, ptr %a, i32 32
390  br label %bb2
391
392bb2:
393  %alloc = phi ptr [ null, %entry ], [ %g, %bb1 ]
394  %ind = phi i64 [ 0, %entry ], [ -4, %bb1 ]
395  %p = getelementptr i8, ptr %alloc, i64 %ind
396  %ret = load i8, ptr %p
397  ret i8 %ret
398}
399
400; Check that merging offsets in a phi works.
401define i8 @f15(i1 %i) {
402; CHECK-LABEL: @f15(
403; CHECK-NEXT:  entry:
404; CHECK-NEXT:    [[A1:%.*]] = alloca [32 x i8], align 1
405; CHECK-NEXT:    [[G1:%.*]] = getelementptr i8, ptr [[A1]], i32 100
406; CHECK-NEXT:    br i1 [[I:%.*]], label [[BB1:%.*]], label [[BB2:%.*]]
407; CHECK:       bb1:
408; CHECK-NEXT:    [[A2:%.*]] = alloca [32 x i8], align 1
409; CHECK-NEXT:    [[G2:%.*]] = getelementptr i8, ptr [[A2]], i32 16
410; CHECK-NEXT:    br label [[BB2]]
411; CHECK:       bb2:
412; CHECK-NEXT:    [[TMP0:%.*]] = phi i64 [ 100, [[ENTRY:%.*]] ], [ 16, [[BB1]] ]
413; CHECK-NEXT:    [[ALLOC:%.*]] = phi ptr [ [[G1]], [[ENTRY]] ], [ [[G2]], [[BB1]] ]
414; CHECK-NEXT:    [[TMP1:%.*]] = sub i64 32, [[TMP0]]
415; CHECK-NEXT:    [[TMP2:%.*]] = icmp ult i64 32, [[TMP0]]
416; CHECK-NEXT:    [[TMP3:%.*]] = icmp ult i64 [[TMP1]], 1
417; CHECK-NEXT:    [[TMP4:%.*]] = or i1 [[TMP2]], [[TMP3]]
418; CHECK-NEXT:    br i1 [[TMP4]], label [[TRAP:%.*]], label [[TMP5:%.*]]
419; CHECK:       5:
420; CHECK-NEXT:    [[RET:%.*]] = load i8, ptr [[ALLOC]], align 1
421; CHECK-NEXT:    ret i8 [[RET]]
422; CHECK:       trap:
423; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
424; CHECK-NEXT:    unreachable
425;
426entry:
427  %a1 = alloca [32 x i8]
428  %g1 = getelementptr i8, ptr %a1, i32 100
429  br i1 %i, label %bb1, label %bb2
430
431bb1:
432  %a2 = alloca [32 x i8]
433  %g2 = getelementptr i8, ptr %a2, i32 16
434  br label %bb2
435
436bb2:
437  %alloc = phi ptr [ %g1, %entry ], [ %g2, %bb1 ]
438  %ret = load i8, ptr %alloc
439  ret i8 %ret
440}
441
442define <4 x i32> @load_vector(i64 %y) nounwind {
443; CHECK-LABEL: @load_vector(
444; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @calloc(i64 1, i64 256)
445; CHECK-NEXT:    [[DOTIDX:%.*]] = mul i64 [[Y:%.*]], 8
446; CHECK-NEXT:    [[TMP2:%.*]] = add i64 0, [[DOTIDX]]
447; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 [[Y]]
448; CHECK-NEXT:    [[TMP4:%.*]] = sub i64 256, [[TMP2]]
449; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i64 256, [[TMP2]]
450; CHECK-NEXT:    [[TMP6:%.*]] = icmp ult i64 [[TMP4]], 16
451; CHECK-NEXT:    [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
452; CHECK-NEXT:    br i1 [[TMP7]], label [[TRAP:%.*]], label [[TMP8:%.*]]
453; CHECK:       8:
454; CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i32>, ptr [[TMP3]], align 8
455; CHECK-NEXT:    ret <4 x i32> [[TMP9]]
456; CHECK:       trap:
457; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
458; CHECK-NEXT:    unreachable
459;
460  %1 = tail call ptr @calloc(i64 1, i64 256)
461  %2 = getelementptr inbounds i64, ptr %1, i64 %y
462  %3 = load <4 x i32>, ptr %2, align 8
463  ret <4 x i32> %3
464}
465
466define <vscale x 1 x i32> @load_scalable_vector(i64 %y) nounwind {
467; CHECK-LABEL: @load_scalable_vector(
468; CHECK-NEXT:    [[TMP1:%.*]] = tail call ptr @calloc(i64 1, i64 256)
469; CHECK-NEXT:    [[DOTIDX:%.*]] = mul i64 [[Y:%.*]], 8
470; CHECK-NEXT:    [[TMP2:%.*]] = add i64 0, [[DOTIDX]]
471; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 [[Y]]
472; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
473; CHECK-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 4
474; CHECK-NEXT:    [[TMP6:%.*]] = sub i64 256, [[TMP2]]
475; CHECK-NEXT:    [[TMP7:%.*]] = icmp ult i64 256, [[TMP2]]
476; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
477; CHECK-NEXT:    [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
478; CHECK-NEXT:    br i1 [[TMP9]], label [[TRAP:%.*]], label [[TMP10:%.*]]
479; CHECK:       10:
480; CHECK-NEXT:    [[TMP11:%.*]] = load <vscale x 1 x i32>, ptr [[TMP3]], align 8
481; CHECK-NEXT:    ret <vscale x 1 x i32> [[TMP11]]
482; CHECK:       trap:
483; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
484; CHECK-NEXT:    unreachable
485;
486  %1 = tail call ptr @calloc(i64 1, i64 256)
487  %2 = getelementptr inbounds i64, ptr %1, i64 %y
488  %3 = load <vscale x 1 x i32>, ptr %2, align 8
489  ret <vscale x 1 x i32> %3
490}
491
492define void @scalable_alloca(i64 %y) nounwind {
493; CHECK-LABEL: @scalable_alloca(
494; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
495; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 8
496; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 5
497; CHECK-NEXT:    [[TMP4:%.*]] = alloca <vscale x 4 x i16>, i32 5, align 8
498; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
499; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 8
500; CHECK-NEXT:    [[DOTIDX:%.*]] = mul i64 [[Y:%.*]], [[TMP6]]
501; CHECK-NEXT:    [[TMP7:%.*]] = add i64 0, [[DOTIDX]]
502; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds <vscale x 4 x i16>, ptr [[TMP4]], i64 [[Y]]
503; CHECK-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
504; CHECK-NEXT:    [[TMP10:%.*]] = mul i64 [[TMP9]], 8
505; CHECK-NEXT:    [[TMP11:%.*]] = sub i64 [[TMP3]], [[TMP7]]
506; CHECK-NEXT:    [[TMP12:%.*]] = icmp ult i64 [[TMP3]], [[TMP7]]
507; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], [[TMP10]]
508; CHECK-NEXT:    [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
509; CHECK-NEXT:    [[TMP15:%.*]] = icmp slt i64 [[TMP7]], 0
510; CHECK-NEXT:    [[TMP16:%.*]] = or i1 [[TMP15]], [[TMP14]]
511; CHECK-NEXT:    br i1 [[TMP16]], label [[TRAP:%.*]], label [[TMP17:%.*]]
512; CHECK:       17:
513; CHECK-NEXT:    [[TMP18:%.*]] = load <vscale x 4 x i16>, ptr [[TMP8]], align 4
514; CHECK-NEXT:    ret void
515; CHECK:       trap:
516; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
517; CHECK-NEXT:    unreachable
518;
519  %1 = alloca <vscale x 4 x i16>, i32 5
520  %2 = getelementptr inbounds <vscale x 4 x i16>, ptr %1, i64 %y
521  %3 = load <vscale x 4 x i16>, ptr %2, align 4
522  ret void
523}
524
525define void @scalable_alloca2(i64 %y) nounwind {
526; CHECK-LABEL: @scalable_alloca2(
527; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
528; CHECK-NEXT:    [[TMP2:%.*]] = mul i64 [[TMP1]], 32
529; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 1
530; CHECK-NEXT:    [[TMP4:%.*]] = alloca <vscale x 4 x i64>, align 32
531; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
532; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 32
533; CHECK-NEXT:    [[DOTIDX:%.*]] = mul i64 [[Y:%.*]], [[TMP6]]
534; CHECK-NEXT:    [[TMP7:%.*]] = add i64 0, [[DOTIDX]]
535; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds <vscale x 4 x i64>, ptr [[TMP4]], i64 [[Y]]
536; CHECK-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
537; CHECK-NEXT:    [[TMP10:%.*]] = mul i64 [[TMP9]], 32
538; CHECK-NEXT:    [[TMP11:%.*]] = sub i64 [[TMP3]], [[TMP7]]
539; CHECK-NEXT:    [[TMP12:%.*]] = icmp ult i64 [[TMP3]], [[TMP7]]
540; CHECK-NEXT:    [[TMP13:%.*]] = icmp ult i64 [[TMP11]], [[TMP10]]
541; CHECK-NEXT:    [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
542; CHECK-NEXT:    [[TMP15:%.*]] = icmp slt i64 [[TMP7]], 0
543; CHECK-NEXT:    [[TMP16:%.*]] = or i1 [[TMP15]], [[TMP14]]
544; CHECK-NEXT:    br i1 [[TMP16]], label [[TRAP:%.*]], label [[TMP17:%.*]]
545; CHECK:       17:
546; CHECK-NEXT:    [[TMP18:%.*]] = load <vscale x 4 x i64>, ptr [[TMP8]], align 4
547; CHECK-NEXT:    ret void
548; CHECK:       trap:
549; CHECK-NEXT:    call void @llvm.trap() #[[ATTR6]]
550; CHECK-NEXT:    unreachable
551;
552  %1 = alloca <vscale x 4 x i64>
553  %2 = getelementptr inbounds <vscale x 4 x i64>, ptr %1, i64 %y
554  %3 = load <vscale x 4 x i64>, ptr %2, align 4
555  ret void
556}
557