xref: /llvm-project/llvm/test/Transforms/InstCombine/assume.ll (revision 2e0af16c9383bb5ed0eda236eb34b92dfb570235)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S  -instcombine-infinite-loop-threshold=2  | FileCheck --check-prefixes=CHECK,DEFAULT %s
3; RUN: opt < %s -passes=instcombine --enable-knowledge-retention -S  -instcombine-infinite-loop-threshold=2  | FileCheck --check-prefixes=CHECK,BUNDLES %s
4
5target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
6target triple = "x86_64-unknown-linux-gnu"
7
8declare void @llvm.assume(i1) #1
9
10; Check that the alignment has been upgraded and that the assume has not
11; been removed:
12
13define i32 @foo1(ptr %a) #0 {
14; DEFAULT-LABEL: @foo1(
15; DEFAULT-NEXT:    [[T0:%.*]] = load i32, ptr [[A:%.*]], align 32
16; DEFAULT-NEXT:    [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
17; DEFAULT-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
18; DEFAULT-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
19; DEFAULT-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
20; DEFAULT-NEXT:    ret i32 [[T0]]
21;
22; BUNDLES-LABEL: @foo1(
23; BUNDLES-NEXT:    [[T0:%.*]] = load i32, ptr [[A:%.*]], align 32
24; BUNDLES-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 32) ]
25; BUNDLES-NEXT:    ret i32 [[T0]]
26;
27  %t0 = load i32, ptr %a, align 4
28  %ptrint = ptrtoint ptr %a to i64
29  %maskedptr = and i64 %ptrint, 31
30  %maskcond = icmp eq i64 %maskedptr, 0
31  tail call void @llvm.assume(i1 %maskcond)
32  ret i32 %t0
33}
34
35; Same check as in @foo1, but make sure it works if the assume is first too.
36
37define i32 @foo2(ptr %a) #0 {
38; DEFAULT-LABEL: @foo2(
39; DEFAULT-NEXT:    [[PTRINT:%.*]] = ptrtoint ptr [[A:%.*]] to i64
40; DEFAULT-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
41; DEFAULT-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
42; DEFAULT-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
43; DEFAULT-NEXT:    [[T0:%.*]] = load i32, ptr [[A]], align 32
44; DEFAULT-NEXT:    ret i32 [[T0]]
45;
46; BUNDLES-LABEL: @foo2(
47; BUNDLES-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A:%.*]], i64 32) ]
48; BUNDLES-NEXT:    [[T0:%.*]] = load i32, ptr [[A]], align 32
49; BUNDLES-NEXT:    ret i32 [[T0]]
50;
51  %ptrint = ptrtoint ptr %a to i64
52  %maskedptr = and i64 %ptrint, 31
53  %maskcond = icmp eq i64 %maskedptr, 0
54  tail call void @llvm.assume(i1 %maskcond)
55  %t0 = load i32, ptr %a, align 4
56  ret i32 %t0
57}
58
59define i32 @simple(i32 %a) #1 {
60; CHECK-LABEL: @simple(
61; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 4
62; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
63; CHECK-NEXT:    ret i32 [[A]]
64;
65  %cmp = icmp eq i32 %a, 4
66  tail call void @llvm.assume(i1 %cmp)
67  ret i32 %a
68}
69
70define i32 @can1(i1 %a, i1 %b, i1 %c) {
71; CHECK-LABEL: @can1(
72; CHECK-NEXT:    call void @llvm.assume(i1 [[A:%.*]])
73; CHECK-NEXT:    call void @llvm.assume(i1 [[B:%.*]])
74; CHECK-NEXT:    call void @llvm.assume(i1 [[C:%.*]])
75; CHECK-NEXT:    ret i32 5
76;
77  %and1 = and i1 %a, %b
78  %and  = and i1 %and1, %c
79  tail call void @llvm.assume(i1 %and)
80  ret i32 5
81}
82
83define i32 @can1_logical(i1 %a, i1 %b, i1 %c) {
84; CHECK-LABEL: @can1_logical(
85; CHECK-NEXT:    call void @llvm.assume(i1 [[A:%.*]])
86; CHECK-NEXT:    call void @llvm.assume(i1 [[B:%.*]])
87; CHECK-NEXT:    call void @llvm.assume(i1 [[C:%.*]])
88; CHECK-NEXT:    ret i32 5
89;
90  %and1 = select i1 %a, i1 %b, i1 false
91  %and  = select i1 %and1, i1 %c, i1 false
92  tail call void @llvm.assume(i1 %and)
93  ret i32 5
94}
95
96define i32 @can2(i1 %a, i1 %b, i1 %c) {
97; CHECK-LABEL: @can2(
98; CHECK-NEXT:    [[TMP1:%.*]] = xor i1 [[A:%.*]], true
99; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP1]])
100; CHECK-NEXT:    [[TMP2:%.*]] = xor i1 [[B:%.*]], true
101; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
102; CHECK-NEXT:    ret i32 5
103;
104  %v = or i1 %a, %b
105  %w = xor i1 %v, 1
106  tail call void @llvm.assume(i1 %w)
107  ret i32 5
108}
109
110define i32 @can2_logical(i1 %a, i1 %b, i1 %c) {
111; CHECK-LABEL: @can2_logical(
112; CHECK-NEXT:    [[TMP1:%.*]] = xor i1 [[A:%.*]], true
113; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP1]])
114; CHECK-NEXT:    [[TMP2:%.*]] = xor i1 [[B:%.*]], true
115; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
116; CHECK-NEXT:    ret i32 5
117;
118  %v = select i1 %a, i1 true, i1 %b
119  %w = xor i1 %v, 1
120  tail call void @llvm.assume(i1 %w)
121  ret i32 5
122}
123
124define i32 @bar1(i32 %a) #0 {
125; CHECK-LABEL: @bar1(
126; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
127; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
128; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
129; CHECK-NEXT:    ret i32 1
130;
131  %and1 = and i32 %a, 3
132  %and = and i32 %a, 7
133  %cmp = icmp eq i32 %and, 1
134  tail call void @llvm.assume(i1 %cmp)
135  ret i32 %and1
136}
137
138define i32 @bar2(i32 %a) #0 {
139; CHECK-LABEL: @bar2(
140; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
141; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
142; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
143; CHECK-NEXT:    ret i32 1
144;
145  %and = and i32 %a, 7
146  %cmp = icmp eq i32 %and, 1
147  tail call void @llvm.assume(i1 %cmp)
148  %and1 = and i32 %a, 3
149  ret i32 %and1
150}
151
152define i32 @bar3(i32 %a, i1 %x, i1 %y) #0 {
153; CHECK-LABEL: @bar3(
154; CHECK-NEXT:  entry:
155; CHECK-NEXT:    tail call void @llvm.assume(i1 [[X:%.*]])
156; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
157; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
158; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
159; CHECK-NEXT:    tail call void @llvm.assume(i1 [[Y:%.*]])
160; CHECK-NEXT:    ret i32 1
161;
162entry:
163  %and1 = and i32 %a, 3
164
165; Don't be fooled by other assumes around.
166
167  tail call void @llvm.assume(i1 %x)
168
169  %and = and i32 %a, 7
170  %cmp = icmp eq i32 %and, 1
171  tail call void @llvm.assume(i1 %cmp)
172
173  tail call void @llvm.assume(i1 %y)
174
175  ret i32 %and1
176}
177
178; If we allow recursive known bits queries based on
179; assumptions, we could do better here:
180; a == b and a & 7 == 1, so b & 7 == 1, so b & 3 == 1, so return 1.
181
182define i32 @known_bits_recursion_via_assumes(i32 %a, i32 %b) {
183; CHECK-LABEL: @known_bits_recursion_via_assumes(
184; CHECK-NEXT:  entry:
185; CHECK-NEXT:    [[AND1:%.*]] = and i32 [[B:%.*]], 3
186; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
187; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
188; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
189; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[A]], [[B]]
190; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP2]])
191; CHECK-NEXT:    ret i32 [[AND1]]
192;
193entry:
194  %and1 = and i32 %b, 3
195  %and = and i32 %a, 7
196  %cmp = icmp eq i32 %and, 1
197  tail call void @llvm.assume(i1 %cmp)
198  %cmp2 = icmp eq i32 %a, %b
199  tail call void @llvm.assume(i1 %cmp2)
200  ret i32 %and1
201}
202
203define i32 @icmp1(i32 %a) #0 {
204; CHECK-LABEL: @icmp1(
205; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
206; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
207; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
208; CHECK-NEXT:    ret i32 [[CONV]]
209;
210  %cmp = icmp sgt i32 %a, 5
211  tail call void @llvm.assume(i1 %cmp)
212  %conv = zext i1 %cmp to i32
213  ret i32 %conv
214}
215
216define i32 @icmp2(i32 %a) #0 {
217; CHECK-LABEL: @icmp2(
218; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
219; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
220; CHECK-NEXT:    ret i32 0
221;
222  %cmp = icmp sgt i32 %a, 5
223  tail call void @llvm.assume(i1 %cmp)
224  %t0 = zext i1 %cmp to i32
225  %lnot.ext = xor i32 %t0, 1
226  ret i32 %lnot.ext
227}
228
229; If the 'not' of a condition is known true, then the condition must be false.
230
231define i1 @assume_not(i1 %cond) {
232; CHECK-LABEL: @assume_not(
233; CHECK-NEXT:    [[NOTCOND:%.*]] = xor i1 [[COND:%.*]], true
234; CHECK-NEXT:    call void @llvm.assume(i1 [[NOTCOND]])
235; CHECK-NEXT:    ret i1 [[COND]]
236;
237  %notcond = xor i1 %cond, true
238  call void @llvm.assume(i1 %notcond)
239  ret i1 %cond
240}
241
242declare void @escape(ptr %a)
243
244; Canonicalize a nonnull assumption on a load into metadata form.
245
246define i32 @bundle1(ptr %P) {
247; CHECK-LABEL: @bundle1(
248; CHECK-NEXT:    tail call void @llvm.assume(i1 true) [ "nonnull"(ptr [[P:%.*]]) ]
249; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[P]], align 4
250; CHECK-NEXT:    ret i32 [[LOAD]]
251;
252  tail call void @llvm.assume(i1 true) ["nonnull"(ptr %P)]
253  %load = load i32, ptr %P
254  ret i32 %load
255}
256
257define i32 @bundle2(ptr %P) {
258; CHECK-LABEL: @bundle2(
259; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[P:%.*]], align 4
260; CHECK-NEXT:    ret i32 [[LOAD]]
261;
262  tail call void @llvm.assume(i1 true) ["ignore"(ptr undef)]
263  %load = load i32, ptr %P
264  ret i32 %load
265}
266
267define i1 @nonnull1(ptr %a) {
268; CHECK-LABEL: @nonnull1(
269; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8, !nonnull [[META6:![0-9]+]], !noundef [[META6]]
270; CHECK-NEXT:    tail call void @escape(ptr nonnull [[LOAD]])
271; CHECK-NEXT:    ret i1 false
272;
273  %load = load ptr, ptr %a
274  %cmp = icmp ne ptr %load, null
275  tail call void @llvm.assume(i1 %cmp)
276  tail call void @escape(ptr %load)
277  %rval = icmp eq ptr %load, null
278  ret i1 %rval
279}
280
281; Make sure the above canonicalization applies only
282; to pointer types.  Doing otherwise would be illegal.
283
284define i1 @nonnull2(ptr %a) {
285; CHECK-LABEL: @nonnull2(
286; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[A:%.*]], align 4
287; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LOAD]], 0
288; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
289; CHECK-NEXT:    ret i1 false
290;
291  %load = load i32, ptr %a
292  %cmp = icmp ne i32 %load, 0
293  tail call void @llvm.assume(i1 %cmp)
294  %rval = icmp eq i32 %load, 0
295  ret i1 %rval
296}
297
298; Make sure the above canonicalization does not trigger
299; if the assume is control dependent on something else
300
301define i1 @nonnull3(ptr %a, i1 %control) {
302; FIXME: in the BUNDLES version we could duplicate the load and keep the assume nonnull.
303; DEFAULT-LABEL: @nonnull3(
304; DEFAULT-NEXT:  entry:
305; DEFAULT-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
306; DEFAULT-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
307; DEFAULT-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
308; DEFAULT:       taken:
309; DEFAULT-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
310; DEFAULT-NEXT:    ret i1 false
311; DEFAULT:       not_taken:
312; DEFAULT-NEXT:    [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
313; DEFAULT-NEXT:    ret i1 [[RVAL_2]]
314;
315; BUNDLES-LABEL: @nonnull3(
316; BUNDLES-NEXT:  entry:
317; BUNDLES-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
318; BUNDLES:       taken:
319; BUNDLES-NEXT:    ret i1 false
320; BUNDLES:       not_taken:
321; BUNDLES-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
322; BUNDLES-NEXT:    [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
323; BUNDLES-NEXT:    ret i1 [[RVAL_2]]
324;
325entry:
326  %load = load ptr, ptr %a
327  %cmp = icmp ne ptr %load, null
328  br i1 %control, label %taken, label %not_taken
329taken:
330  tail call void @llvm.assume(i1 %cmp)
331  %rval = icmp eq ptr %load, null
332  ret i1 %rval
333not_taken:
334  %rval.2 = icmp sgt ptr %load, null
335  ret i1 %rval.2
336}
337
338; Make sure the above canonicalization does not trigger
339; if the path from the load to the assume is potentially
340; interrupted by an exception being thrown
341
342define i1 @nonnull4(ptr %a) {
343; DEFAULT-LABEL: @nonnull4(
344; DEFAULT-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
345; DEFAULT-NEXT:    tail call void @escape(ptr [[LOAD]])
346; DEFAULT-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
347; DEFAULT-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
348; DEFAULT-NEXT:    ret i1 false
349;
350; BUNDLES-LABEL: @nonnull4(
351; BUNDLES-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
352; BUNDLES-NEXT:    tail call void @escape(ptr [[LOAD]])
353; BUNDLES-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(ptr [[LOAD]]) ]
354; BUNDLES-NEXT:    ret i1 false
355;
356  %load = load ptr, ptr %a
357  ;; This call may throw!
358  tail call void @escape(ptr %load)
359  %cmp = icmp ne ptr %load, null
360  tail call void @llvm.assume(i1 %cmp)
361  %rval = icmp eq ptr %load, null
362  ret i1 %rval
363}
364define i1 @nonnull5(ptr %a) {
365; CHECK-LABEL: @nonnull5(
366; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
367; CHECK-NEXT:    tail call void @escape(ptr [[LOAD]])
368; CHECK-NEXT:    [[CMP:%.*]] = icmp slt ptr [[LOAD]], null
369; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
370; CHECK-NEXT:    ret i1 false
371;
372  %load = load ptr, ptr %a
373  ;; This call may throw!
374  tail call void @escape(ptr %load)
375  %integral = ptrtoint ptr %load to i64
376  %cmp = icmp slt i64 %integral, 0
377  tail call void @llvm.assume(i1 %cmp) ; %load has at least highest bit set
378  %rval = icmp eq ptr %load, null
379  ret i1 %rval
380}
381
382; PR35846 - https://bugs.llvm.org/show_bug.cgi?id=35846
383
384define i32 @assumption_conflicts_with_known_bits(i32 %a, i32 %b) {
385; CHECK-LABEL: @assumption_conflicts_with_known_bits(
386; CHECK-NEXT:    store i1 true, ptr poison, align 1
387; CHECK-NEXT:    ret i32 poison
388;
389  %and1 = and i32 %b, 3
390  %B1 = lshr i32 %and1, %and1
391  %B3 = shl nuw nsw i32 %and1, %B1
392  %cmp = icmp eq i32 %B3, 1
393  tail call void @llvm.assume(i1 %cmp)
394  %cmp2 = icmp eq i32 %B1, %B3
395  tail call void @llvm.assume(i1 %cmp2)
396  ret i32 %and1
397}
398
399; PR37726 - https://bugs.llvm.org/show_bug.cgi?id=37726
400; There's a loophole in eliminating a redundant assumption when
401; we have conflicting assumptions. Verify that debuginfo doesn't
402; get in the way of the fold.
403
404define void @debug_interference(i8 %x) {
405; CHECK-LABEL: @debug_interference(
406; CHECK-NEXT:    tail call void @llvm.dbg.value(metadata i32 5, metadata [[META7:![0-9]+]], metadata !DIExpression()), !dbg [[DBG9:![0-9]+]]
407; CHECK-NEXT:    store i1 true, ptr poison, align 1
408; CHECK-NEXT:    ret void
409;
410  %cmp1 = icmp eq i8 %x, 0
411  %cmp2 = icmp ne i8 %x, 0
412  tail call void @llvm.assume(i1 %cmp1)
413  tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
414  tail call void @llvm.assume(i1 %cmp1)
415  tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
416  tail call void @llvm.assume(i1 %cmp2)
417  tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
418  tail call void @llvm.assume(i1 %cmp2)
419  ret void
420}
421
422; This would crash.
423; Does it ever make sense to peek through a bitcast of the icmp operand?
424
425define i32 @PR40940(<4 x i8> %x) {
426; CHECK-LABEL: @PR40940(
427; CHECK-NEXT:    [[SHUF:%.*]] = shufflevector <4 x i8> [[X:%.*]], <4 x i8> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 3>
428; CHECK-NEXT:    [[T2:%.*]] = bitcast <4 x i8> [[SHUF]] to i32
429; CHECK-NEXT:    [[T3:%.*]] = icmp ult i32 [[T2]], 65536
430; CHECK-NEXT:    call void @llvm.assume(i1 [[T3]])
431; CHECK-NEXT:    ret i32 [[T2]]
432;
433  %shuf = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 3>
434  %t2 = bitcast <4 x i8> %shuf to i32
435  %t3 = icmp ult i32 %t2, 65536
436  call void @llvm.assume(i1 %t3)
437  ret i32 %t2
438}
439
440define i1 @nonnull3A(ptr %a, i1 %control) {
441; DEFAULT-LABEL: @nonnull3A(
442; DEFAULT-NEXT:  entry:
443; DEFAULT-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
444; DEFAULT-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
445; DEFAULT:       taken:
446; DEFAULT-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
447; DEFAULT-NEXT:    call void @llvm.assume(i1 [[CMP]])
448; DEFAULT-NEXT:    ret i1 [[CMP]]
449; DEFAULT:       not_taken:
450; DEFAULT-NEXT:    [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
451; DEFAULT-NEXT:    ret i1 [[RVAL_2]]
452;
453; BUNDLES-LABEL: @nonnull3A(
454; BUNDLES-NEXT:  entry:
455; BUNDLES-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
456; BUNDLES:       taken:
457; BUNDLES-NEXT:    ret i1 true
458; BUNDLES:       not_taken:
459; BUNDLES-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
460; BUNDLES-NEXT:    [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
461; BUNDLES-NEXT:    ret i1 [[RVAL_2]]
462;
463entry:
464  %load = load ptr, ptr %a
465  %cmp = icmp ne ptr %load, null
466  br i1 %control, label %taken, label %not_taken
467taken:
468  call void @llvm.assume(i1 %cmp)
469  ret i1 %cmp
470not_taken:
471  call void @llvm.assume(i1 %cmp)
472  %rval.2 = icmp sgt ptr %load, null
473  ret i1 %rval.2
474}
475
476define i1 @nonnull3B(ptr %a, i1 %control) {
477; CHECK-LABEL: @nonnull3B(
478; CHECK-NEXT:  entry:
479; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
480; CHECK:       taken:
481; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
482; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
483; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]]) [ "nonnull"(ptr [[LOAD]]) ]
484; CHECK-NEXT:    ret i1 [[CMP]]
485; CHECK:       not_taken:
486; CHECK-NEXT:    ret i1 [[CONTROL]]
487;
488entry:
489  %load = load ptr, ptr %a
490  %cmp = icmp ne ptr %load, null
491  br i1 %control, label %taken, label %not_taken
492taken:
493  call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
494  ret i1 %cmp
495not_taken:
496  call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
497  ret i1 %control
498}
499
500declare i1 @tmp1(i1)
501
502define i1 @nonnull3C(ptr %a, i1 %control) {
503; CHECK-LABEL: @nonnull3C(
504; CHECK-NEXT:  entry:
505; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
506; CHECK:       taken:
507; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
508; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
509; CHECK-NEXT:    [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]])
510; CHECK-NEXT:    br label [[EXIT:%.*]]
511; CHECK:       exit:
512; CHECK-NEXT:    ret i1 [[CMP2]]
513; CHECK:       not_taken:
514; CHECK-NEXT:    ret i1 [[CONTROL]]
515;
516entry:
517  %load = load ptr, ptr %a
518  %cmp = icmp ne ptr %load, null
519  br i1 %control, label %taken, label %not_taken
520taken:
521  %cmp2 = call i1 @tmp1(i1 %cmp)
522  br label %exit
523exit:
524  ; FIXME: this shouldn't be dropped because it is still dominated by the new position of %load
525  call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
526  ret i1 %cmp2
527not_taken:
528  call void @llvm.assume(i1 %cmp)
529  ret i1 %control
530}
531
532define i1 @nonnull3D(ptr %a, i1 %control) {
533; CHECK-LABEL: @nonnull3D(
534; CHECK-NEXT:  entry:
535; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
536; CHECK:       taken:
537; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
538; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
539; CHECK-NEXT:    [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]])
540; CHECK-NEXT:    br label [[EXIT:%.*]]
541; CHECK:       exit:
542; CHECK-NEXT:    ret i1 [[CMP2]]
543; CHECK:       not_taken:
544; CHECK-NEXT:    ret i1 [[CONTROL]]
545;
546entry:
547  %load = load ptr, ptr %a
548  %cmp = icmp ne ptr %load, null
549  br i1 %control, label %taken, label %not_taken
550taken:
551  %cmp2 = call i1 @tmp1(i1 %cmp)
552  br label %exit
553exit:
554  ret i1 %cmp2
555not_taken:
556  call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
557  ret i1 %control
558}
559
560
561define void @always_true_assumption() {
562; CHECK-LABEL: @always_true_assumption(
563; CHECK-NEXT:    ret void
564;
565  call void @llvm.assume(i1 true)
566  ret void
567}
568
569; The alloca guarantees that the low bits of %a are zero because of alignment.
570; The assume says the opposite. Make sure we don't crash.
571
572define i64 @PR31809() {
573; CHECK-LABEL: @PR31809(
574; CHECK-NEXT:    store i1 true, ptr poison, align 1
575; CHECK-NEXT:    ret i64 poison
576;
577  %a = alloca i32
578  %t1 = ptrtoint ptr %a to i64
579  %cond = icmp eq i64 %t1, 3
580  call void @llvm.assume(i1 %cond)
581  ret i64 %t1
582}
583
584; Similar to above: there's no way to know which assumption is truthful,
585; so just don't crash.
586
587define i8 @conflicting_assumptions(i8 %x){
588; CHECK-LABEL: @conflicting_assumptions(
589; CHECK-NEXT:    store i1 true, ptr poison, align 1
590; CHECK-NEXT:    ret i8 poison
591;
592  %add = add i8 %x, 1
593  %cond1 = icmp eq i8 %x, 3
594  call void @llvm.assume(i1 %cond1)
595  %cond2 = icmp eq i8 %x, 4
596  call void @llvm.assume(i1 %cond2)
597  ret i8 %add
598}
599
600; Another case of conflicting assumptions. This would crash because we'd
601; try to set more known bits than existed in the known bits struct.
602
603define void @PR36270(i32 %b) {
604; CHECK-LABEL: @PR36270(
605; CHECK-NEXT:    unreachable
606;
607  %B7 = xor i32 -1, 2147483647
608  %and1 = and i32 %b, 3
609  %B12 = lshr i32 %B7, %and1
610  %C1 = icmp ult i32 %and1, %B12
611  tail call void @llvm.assume(i1 %C1)
612  %cmp2 = icmp eq i32 0, %B12
613  tail call void @llvm.assume(i1 %cmp2)
614  unreachable
615}
616
617; PR47416
618
619define i32 @unreachable_assume(i32 %x, i32 %y) {
620; CHECK-LABEL: @unreachable_assume(
621; CHECK-NEXT:  entry:
622; CHECK-NEXT:    [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
623; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
624; CHECK-NEXT:    [[OR:%.*]] = or i1 [[CMP0]], [[CMP1]]
625; CHECK-NEXT:    tail call void @llvm.assume(i1 [[OR]])
626; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[X]], 1
627; CHECK-NEXT:    br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
628; CHECK:       if:
629; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], -2
630; CHECK-NEXT:    [[CMP3:%.*]] = icmp ne i32 [[A]], 104
631; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP3]])
632; CHECK-NEXT:    br label [[EXIT]]
633; CHECK:       exit:
634; CHECK-NEXT:    unreachable
635;
636entry:
637  %cmp0 = icmp sgt i32 %x, 1
638  %cmp1 = icmp eq i32 %y, 1
639  %or = or i1 %cmp0, %cmp1
640  tail call void @llvm.assume(i1 %or)
641  %cmp2 = icmp eq i32 %x, 1
642  br i1 %cmp2, label %if, label %exit
643
644if:
645  %a = and i32 %y, -2
646  %cmp3 = icmp ne i32 %a, 104
647  tail call void @llvm.assume(i1 %cmp3)
648  br label %exit
649
650exit:
651  %cmp4 = icmp eq i32 %x, 2
652  tail call void @llvm.assume(i1 %cmp4)
653  unreachable
654}
655
656define i32 @unreachable_assume_logical(i32 %x, i32 %y) {
657; CHECK-LABEL: @unreachable_assume_logical(
658; CHECK-NEXT:  entry:
659; CHECK-NEXT:    [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
660; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
661; CHECK-NEXT:    [[OR:%.*]] = select i1 [[CMP0]], i1 true, i1 [[CMP1]]
662; CHECK-NEXT:    tail call void @llvm.assume(i1 [[OR]])
663; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[X]], 1
664; CHECK-NEXT:    br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
665; CHECK:       if:
666; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], -2
667; CHECK-NEXT:    [[CMP3:%.*]] = icmp ne i32 [[A]], 104
668; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP3]])
669; CHECK-NEXT:    br label [[EXIT]]
670; CHECK:       exit:
671; CHECK-NEXT:    unreachable
672;
673entry:
674  %cmp0 = icmp sgt i32 %x, 1
675  %cmp1 = icmp eq i32 %y, 1
676  %or = select i1 %cmp0, i1 true, i1 %cmp1
677  tail call void @llvm.assume(i1 %or)
678  %cmp2 = icmp eq i32 %x, 1
679  br i1 %cmp2, label %if, label %exit
680
681if:
682  %a = and i32 %y, -2
683  %cmp3 = icmp ne i32 %a, 104
684  tail call void @llvm.assume(i1 %cmp3)
685  br label %exit
686
687exit:
688  %cmp4 = icmp eq i32 %x, 2
689  tail call void @llvm.assume(i1 %cmp4)
690  unreachable
691}
692
693define i32 @unreachable_assumes_and_store(i32 %x, i32 %y, ptr %p) {
694; CHECK-LABEL: @unreachable_assumes_and_store(
695; CHECK-NEXT:  entry:
696; CHECK-NEXT:    [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
697; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
698; CHECK-NEXT:    [[OR:%.*]] = or i1 [[CMP0]], [[CMP1]]
699; CHECK-NEXT:    tail call void @llvm.assume(i1 [[OR]])
700; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[X]], 1
701; CHECK-NEXT:    br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
702; CHECK:       if:
703; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], -2
704; CHECK-NEXT:    [[CMP3:%.*]] = icmp ne i32 [[A]], 104
705; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP3]])
706; CHECK-NEXT:    br label [[EXIT]]
707; CHECK:       exit:
708; CHECK-NEXT:    unreachable
709;
710entry:
711  %cmp0 = icmp sgt i32 %x, 1
712  %cmp1 = icmp eq i32 %y, 1
713  %or = or i1 %cmp0, %cmp1
714  tail call void @llvm.assume(i1 %or)
715  %cmp2 = icmp eq i32 %x, 1
716  br i1 %cmp2, label %if, label %exit
717
718if:
719  %a = and i32 %y, -2
720  %cmp3 = icmp ne i32 %a, 104
721  tail call void @llvm.assume(i1 %cmp3)
722  br label %exit
723
724exit:
725  %cmp4 = icmp eq i32 %x, 2
726  tail call void @llvm.assume(i1 %cmp4)
727  %cmp5 = icmp ugt i32 %y, 42
728  tail call void @llvm.assume(i1 %cmp5)
729  store i32 %x, ptr %p
730  unreachable
731}
732
733define i32 @unreachable_assumes_and_store_logical(i32 %x, i32 %y, ptr %p) {
734; CHECK-LABEL: @unreachable_assumes_and_store_logical(
735; CHECK-NEXT:  entry:
736; CHECK-NEXT:    [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
737; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
738; CHECK-NEXT:    [[OR:%.*]] = select i1 [[CMP0]], i1 true, i1 [[CMP1]]
739; CHECK-NEXT:    tail call void @llvm.assume(i1 [[OR]])
740; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[X]], 1
741; CHECK-NEXT:    br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
742; CHECK:       if:
743; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], -2
744; CHECK-NEXT:    [[CMP3:%.*]] = icmp ne i32 [[A]], 104
745; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP3]])
746; CHECK-NEXT:    br label [[EXIT]]
747; CHECK:       exit:
748; CHECK-NEXT:    unreachable
749;
750entry:
751  %cmp0 = icmp sgt i32 %x, 1
752  %cmp1 = icmp eq i32 %y, 1
753  %or = select i1 %cmp0, i1 true, i1 %cmp1
754  tail call void @llvm.assume(i1 %or)
755  %cmp2 = icmp eq i32 %x, 1
756  br i1 %cmp2, label %if, label %exit
757
758if:
759  %a = and i32 %y, -2
760  %cmp3 = icmp ne i32 %a, 104
761  tail call void @llvm.assume(i1 %cmp3)
762  br label %exit
763
764exit:
765  %cmp4 = icmp eq i32 %x, 2
766  tail call void @llvm.assume(i1 %cmp4)
767  %cmp5 = icmp ugt i32 %y, 42
768  tail call void @llvm.assume(i1 %cmp5)
769  store i32 %x, ptr %p
770  unreachable
771}
772
773define void @canonicalize_assume(ptr %0) {
774; DEFAULT-LABEL: @canonicalize_assume(
775; DEFAULT-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP0:%.*]], i64 2
776; DEFAULT-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 16) ]
777; DEFAULT-NEXT:    ret void
778;
779; BUNDLES-LABEL: @canonicalize_assume(
780; BUNDLES-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP0:%.*]], i64 8) ]
781; BUNDLES-NEXT:    ret void
782;
783  %2 = getelementptr inbounds i32, ptr %0, i64 2
784  call void @llvm.assume(i1 true) [ "align"(ptr %2, i64 16) ]
785  ret void
786}
787
788define void @assume_makes_and_known_assume_on_arg(ptr %p, i32 %x) {
789; CHECK-LABEL: @assume_makes_and_known_assume_on_arg(
790; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 1
791; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
792; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
793; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
794; CHECK-NEXT:    ret void
795;
796  %and = and i32 %x, 1
797  %cmp = icmp eq i32 %and, 0
798  call void @llvm.assume(i1 %cmp)
799  %and2 = and i32 %x, 1
800  store i32 %and2, ptr %p
801  ret void
802}
803
804define void @assume_makes_and_known_assume_on_mul(ptr %p, i32 %a, i32 %b) {
805; CHECK-LABEL: @assume_makes_and_known_assume_on_mul(
806; CHECK-NEXT:    [[X:%.*]] = mul i32 [[A:%.*]], [[B:%.*]]
807; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X]], 1
808; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
809; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
810; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
811; CHECK-NEXT:    ret void
812;
813  %x = mul i32 %a, %b
814  %and = and i32 %x, 1
815  %cmp = icmp eq i32 %and, 0
816  call void @llvm.assume(i1 %cmp)
817  %and2 = and i32 %x, 1
818  store i32 %and2, ptr %p
819  ret void
820}
821
822define void @assume_makes_and_known_assume_on_bitwise(ptr %p, i32 %a, i32 %b) {
823; CHECK-LABEL: @assume_makes_and_known_assume_on_bitwise(
824; CHECK-NEXT:    [[X:%.*]] = or i32 [[A:%.*]], [[B:%.*]]
825; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X]], 1
826; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
827; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
828; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
829; CHECK-NEXT:    ret void
830;
831  %x = or i32 %a, %b
832  %and = and i32 %x, 1
833  %cmp = icmp eq i32 %and, 0
834  call void @llvm.assume(i1 %cmp)
835  %and2 = and i32 %x, 1
836  store i32 %and2, ptr %p
837  ret void
838}
839
840define i32 @range_16_31_top28(i32 %x) {
841; CHECK-LABEL: @range_16_31_top28(
842; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], -16
843; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP1]], 16
844; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
845; CHECK-NEXT:    ret i32 16
846;
847  %add = add i32 %x, -16
848  %cmp = icmp ult i32 %add, 16
849  call void @llvm.assume(i1 %cmp)
850  %res = and i32 %x, u0xfffffff0
851  ret i32 %res
852}
853
854define i32 @range_16_31_top29(i32 %x) {
855; CHECK-LABEL: @range_16_31_top29(
856; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], -16
857; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP1]], 16
858; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
859; CHECK-NEXT:    [[RES:%.*]] = and i32 [[X]], 24
860; CHECK-NEXT:    ret i32 [[RES]]
861;
862  %add = add i32 %x, -16
863  %cmp = icmp ult i32 %add, 16
864  call void @llvm.assume(i1 %cmp)
865  %res = and i32 %x, u0xfffffff8
866  ret i32 %res
867}
868
869define i32 @range_16_30_top28(i32 %x) {
870; CHECK-LABEL: @range_16_30_top28(
871; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -16
872; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 15
873; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
874; CHECK-NEXT:    ret i32 16
875;
876  %add = add i32 %x, -16
877  %cmp = icmp ult i32 %add, 15
878  call void @llvm.assume(i1 %cmp)
879  %res = and i32 %x, u0xfffffff0
880  ret i32 %res
881}
882
883define i32 @range_16_32_top28(i32 %x) {
884; CHECK-LABEL: @range_16_32_top28(
885; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -16
886; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 17
887; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
888; CHECK-NEXT:    [[RES:%.*]] = and i32 [[X]], 48
889; CHECK-NEXT:    ret i32 [[RES]]
890;
891  %add = add i32 %x, -16
892  %cmp = icmp ult i32 %add, 17
893  call void @llvm.assume(i1 %cmp)
894  %res = and i32 %x, u0xfffffff0
895  ret i32 %res
896}
897
898define i32 @range_16_32_top27(i32 %x) {
899; CHECK-LABEL: @range_16_32_top27(
900; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -16
901; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 17
902; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
903; CHECK-NEXT:    [[RES:%.*]] = and i32 [[X]], 32
904; CHECK-NEXT:    ret i32 [[RES]]
905;
906  %add = add i32 %x, -16
907  %cmp = icmp ult i32 %add, 17
908  call void @llvm.assume(i1 %cmp)
909  %res = and i32 %x, u0xffffffe0
910  ret i32 %res
911}
912
913define i32 @range_16_32_top26(i32 %x) {
914; CHECK-LABEL: @range_16_32_top26(
915; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -16
916; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 17
917; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
918; CHECK-NEXT:    ret i32 0
919;
920  %add = add i32 %x, -16
921  %cmp = icmp ult i32 %add, 17
922  call void @llvm.assume(i1 %cmp)
923  %res = and i32 %x, u0xffffffc0
924  ret i32 %res
925}
926
927define i32 @range_15_31_top28(i32 %x) {
928; CHECK-LABEL: @range_15_31_top28(
929; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -15
930; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 16
931; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
932; CHECK-NEXT:    [[RES:%.*]] = and i32 [[X]], 16
933; CHECK-NEXT:    ret i32 [[RES]]
934;
935  %add = add i32 %x, -15
936  %cmp = icmp ult i32 %add, 16
937  call void @llvm.assume(i1 %cmp)
938  %res = and i32 %x, u0xfffffff0
939  ret i32 %res
940}
941
942define i32 @range_15_31_top27(i32 %x) {
943; CHECK-LABEL: @range_15_31_top27(
944; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -15
945; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 16
946; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
947; CHECK-NEXT:    ret i32 0
948;
949  %add = add i32 %x, -15
950  %cmp = icmp ult i32 %add, 16
951  call void @llvm.assume(i1 %cmp)
952  %res = and i32 %x, u0xffffffe0
953  ret i32 %res
954}
955
956declare void @llvm.dbg.value(metadata, metadata, metadata)
957
958!llvm.dbg.cu = !{!0}
959!llvm.module.flags = !{!5, !6, !7, !8}
960
961!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !3, producer: "Me", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: null, retainedTypes: null, imports: null)
962!1 = !DILocalVariable(name: "", arg: 1, scope: !2, file: null, line: 1, type: null)
963!2 = distinct !DISubprogram(name: "debug", linkageName: "debug", scope: null, file: null, line: 0, type: null, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
964!3 = !DIFile(filename: "consecutive-fences.ll", directory: "")
965!5 = !{i32 2, !"Dwarf Version", i32 4}
966!6 = !{i32 2, !"Debug Info Version", i32 3}
967!7 = !{i32 1, !"wchar_size", i32 4}
968!8 = !{i32 7, !"PIC Level", i32 2}
969!9 = !DILocation(line: 0, column: 0, scope: !2)
970
971
972attributes #0 = { nounwind uwtable }
973attributes #1 = { nounwind }
974
975