xref: /llvm-project/llvm/test/Transforms/InstCombine/assume.ll (revision 28a5e6b069ee7540cd0965a0ad6cf0475db8d68c)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck --check-prefixes=CHECK,DEFAULT %s
3; RUN: opt < %s -passes=instcombine --enable-knowledge-retention -S | FileCheck --check-prefixes=CHECK,BUNDLES %s
4
5; RUN: opt < %s -passes=instcombine -S --try-experimental-debuginfo-iterators | FileCheck --check-prefixes=CHECK,DEFAULT %s
6; RUN: opt < %s -passes=instcombine --enable-knowledge-retention -S --try-experimental-debuginfo-iterators | FileCheck --check-prefixes=CHECK,BUNDLES %s
7
8target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
9target triple = "x86_64-unknown-linux-gnu"
10
11declare void @llvm.assume(i1) #1
12
13; Check that the assume has not been removed:
14
15define i32 @foo1(ptr %a) #0 {
16; DEFAULT-LABEL: @foo1(
17; DEFAULT-NEXT:    [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4
18; DEFAULT-NEXT:    [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64
19; DEFAULT-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
20; DEFAULT-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
21; DEFAULT-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
22; DEFAULT-NEXT:    ret i32 [[T0]]
23;
24; BUNDLES-LABEL: @foo1(
25; BUNDLES-NEXT:    [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4
26; BUNDLES-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 32) ]
27; BUNDLES-NEXT:    ret i32 [[T0]]
28;
29  %t0 = load i32, ptr %a, align 4
30  %ptrint = ptrtoint ptr %a to i64
31  %maskedptr = and i64 %ptrint, 31
32  %maskcond = icmp eq i64 %maskedptr, 0
33  tail call void @llvm.assume(i1 %maskcond)
34  ret i32 %t0
35}
36
37; Same check as in @foo1, but make sure it works if the assume is first too.
38
39define i32 @foo2(ptr %a) #0 {
40; DEFAULT-LABEL: @foo2(
41; DEFAULT-NEXT:    [[PTRINT:%.*]] = ptrtoint ptr [[A:%.*]] to i64
42; DEFAULT-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
43; DEFAULT-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
44; DEFAULT-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
45; DEFAULT-NEXT:    [[T0:%.*]] = load i32, ptr [[A]], align 4
46; DEFAULT-NEXT:    ret i32 [[T0]]
47;
48; BUNDLES-LABEL: @foo2(
49; BUNDLES-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[A:%.*]], i64 32) ]
50; BUNDLES-NEXT:    [[T0:%.*]] = load i32, ptr [[A]], align 4
51; BUNDLES-NEXT:    ret i32 [[T0]]
52;
53  %ptrint = ptrtoint ptr %a to i64
54  %maskedptr = and i64 %ptrint, 31
55  %maskcond = icmp eq i64 %maskedptr, 0
56  tail call void @llvm.assume(i1 %maskcond)
57  %t0 = load i32, ptr %a, align 4
58  ret i32 %t0
59}
60
61define i32 @simple(i32 %a) #1 {
62; CHECK-LABEL: @simple(
63; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 4
64; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
65; CHECK-NEXT:    ret i32 [[A]]
66;
67  %cmp = icmp eq i32 %a, 4
68  tail call void @llvm.assume(i1 %cmp)
69  ret i32 %a
70}
71
72define i32 @can1(i1 %a, i1 %b, i1 %c) {
73; CHECK-LABEL: @can1(
74; CHECK-NEXT:    call void @llvm.assume(i1 [[A:%.*]])
75; CHECK-NEXT:    call void @llvm.assume(i1 [[B:%.*]])
76; CHECK-NEXT:    call void @llvm.assume(i1 [[C:%.*]])
77; CHECK-NEXT:    ret i32 5
78;
79  %and1 = and i1 %a, %b
80  %and  = and i1 %and1, %c
81  tail call void @llvm.assume(i1 %and)
82  ret i32 5
83}
84
85define i32 @can1_logical(i1 %a, i1 %b, i1 %c) {
86; CHECK-LABEL: @can1_logical(
87; CHECK-NEXT:    call void @llvm.assume(i1 [[A:%.*]])
88; CHECK-NEXT:    call void @llvm.assume(i1 [[B:%.*]])
89; CHECK-NEXT:    call void @llvm.assume(i1 [[C:%.*]])
90; CHECK-NEXT:    ret i32 5
91;
92  %and1 = select i1 %a, i1 %b, i1 false
93  %and  = select i1 %and1, i1 %c, i1 false
94  tail call void @llvm.assume(i1 %and)
95  ret i32 5
96}
97
98define i32 @can2(i1 %a, i1 %b, i1 %c) {
99; CHECK-LABEL: @can2(
100; CHECK-NEXT:    [[TMP1:%.*]] = xor i1 [[A:%.*]], true
101; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP1]])
102; CHECK-NEXT:    [[TMP2:%.*]] = xor i1 [[B:%.*]], true
103; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
104; CHECK-NEXT:    ret i32 5
105;
106  %v = or i1 %a, %b
107  %w = xor i1 %v, 1
108  tail call void @llvm.assume(i1 %w)
109  ret i32 5
110}
111
112define i32 @can2_logical(i1 %a, i1 %b, i1 %c) {
113; CHECK-LABEL: @can2_logical(
114; CHECK-NEXT:    [[TMP1:%.*]] = xor i1 [[A:%.*]], true
115; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP1]])
116; CHECK-NEXT:    [[TMP2:%.*]] = xor i1 [[B:%.*]], true
117; CHECK-NEXT:    call void @llvm.assume(i1 [[TMP2]])
118; CHECK-NEXT:    ret i32 5
119;
120  %v = select i1 %a, i1 true, i1 %b
121  %w = xor i1 %v, 1
122  tail call void @llvm.assume(i1 %w)
123  ret i32 5
124}
125
126define i32 @bar1(i32 %a) #0 {
127; CHECK-LABEL: @bar1(
128; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
129; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
130; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
131; CHECK-NEXT:    ret i32 1
132;
133  %and1 = and i32 %a, 3
134  %and = and i32 %a, 7
135  %cmp = icmp eq i32 %and, 1
136  tail call void @llvm.assume(i1 %cmp)
137  ret i32 %and1
138}
139
140define i32 @bar2(i32 %a) #0 {
141; CHECK-LABEL: @bar2(
142; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
143; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
144; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
145; CHECK-NEXT:    ret i32 1
146;
147  %and = and i32 %a, 7
148  %cmp = icmp eq i32 %and, 1
149  tail call void @llvm.assume(i1 %cmp)
150  %and1 = and i32 %a, 3
151  ret i32 %and1
152}
153
154define i32 @bar3(i32 %a, i1 %x, i1 %y) #0 {
155; CHECK-LABEL: @bar3(
156; CHECK-NEXT:  entry:
157; CHECK-NEXT:    tail call void @llvm.assume(i1 [[X:%.*]])
158; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
159; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
160; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
161; CHECK-NEXT:    tail call void @llvm.assume(i1 [[Y:%.*]])
162; CHECK-NEXT:    ret i32 1
163;
164entry:
165  %and1 = and i32 %a, 3
166
167; Don't be fooled by other assumes around.
168
169  tail call void @llvm.assume(i1 %x)
170
171  %and = and i32 %a, 7
172  %cmp = icmp eq i32 %and, 1
173  tail call void @llvm.assume(i1 %cmp)
174
175  tail call void @llvm.assume(i1 %y)
176
177  ret i32 %and1
178}
179
180; If we allow recursive known bits queries based on
181; assumptions, we could do better here:
182; a == b and a & 7 == 1, so b & 7 == 1, so b & 3 == 1, so return 1.
183
184define i32 @known_bits_recursion_via_assumes(i32 %a, i32 %b) {
185; CHECK-LABEL: @known_bits_recursion_via_assumes(
186; CHECK-NEXT:  entry:
187; CHECK-NEXT:    [[AND1:%.*]] = and i32 [[B:%.*]], 3
188; CHECK-NEXT:    [[AND:%.*]] = and i32 [[A:%.*]], 7
189; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 1
190; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
191; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[A]], [[B]]
192; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP2]])
193; CHECK-NEXT:    ret i32 [[AND1]]
194;
195entry:
196  %and1 = and i32 %b, 3
197  %and = and i32 %a, 7
198  %cmp = icmp eq i32 %and, 1
199  tail call void @llvm.assume(i1 %cmp)
200  %cmp2 = icmp eq i32 %a, %b
201  tail call void @llvm.assume(i1 %cmp2)
202  ret i32 %and1
203}
204
205define i32 @icmp1(i32 %a) #0 {
206; CHECK-LABEL: @icmp1(
207; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
208; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
209; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
210; CHECK-NEXT:    ret i32 [[CONV]]
211;
212  %cmp = icmp sgt i32 %a, 5
213  tail call void @llvm.assume(i1 %cmp)
214  %conv = zext i1 %cmp to i32
215  ret i32 %conv
216}
217
218define i32 @icmp2(i32 %a) #0 {
219; CHECK-LABEL: @icmp2(
220; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
221; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
222; CHECK-NEXT:    ret i32 0
223;
224  %cmp = icmp sgt i32 %a, 5
225  tail call void @llvm.assume(i1 %cmp)
226  %t0 = zext i1 %cmp to i32
227  %lnot.ext = xor i32 %t0, 1
228  ret i32 %lnot.ext
229}
230
231; If the 'not' of a condition is known true, then the condition must be false.
232
233define i1 @assume_not(i1 %cond) {
234; CHECK-LABEL: @assume_not(
235; CHECK-NEXT:    [[NOTCOND:%.*]] = xor i1 [[COND:%.*]], true
236; CHECK-NEXT:    call void @llvm.assume(i1 [[NOTCOND]])
237; CHECK-NEXT:    ret i1 [[COND]]
238;
239  %notcond = xor i1 %cond, true
240  call void @llvm.assume(i1 %notcond)
241  ret i1 %cond
242}
243
244declare void @escape(ptr %a)
245
246; Canonicalize a nonnull assumption on a load into metadata form.
247
248define i32 @bundle1(ptr %P) {
249; CHECK-LABEL: @bundle1(
250; CHECK-NEXT:    tail call void @llvm.assume(i1 true) [ "nonnull"(ptr [[P:%.*]]) ]
251; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[P]], align 4
252; CHECK-NEXT:    ret i32 [[LOAD]]
253;
254  tail call void @llvm.assume(i1 true) ["nonnull"(ptr %P)]
255  %load = load i32, ptr %P
256  ret i32 %load
257}
258
259define i32 @bundle2(ptr %P) {
260; CHECK-LABEL: @bundle2(
261; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[P:%.*]], align 4
262; CHECK-NEXT:    ret i32 [[LOAD]]
263;
264  tail call void @llvm.assume(i1 true) ["ignore"(ptr undef)]
265  %load = load i32, ptr %P
266  ret i32 %load
267}
268
269define i1 @nonnull1(ptr %a) {
270; CHECK-LABEL: @nonnull1(
271; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8, !nonnull [[META6:![0-9]+]], !noundef [[META6]]
272; CHECK-NEXT:    tail call void @escape(ptr nonnull [[LOAD]])
273; CHECK-NEXT:    ret i1 false
274;
275  %load = load ptr, ptr %a
276  %cmp = icmp ne ptr %load, null
277  tail call void @llvm.assume(i1 %cmp)
278  tail call void @escape(ptr %load)
279  %rval = icmp eq ptr %load, null
280  ret i1 %rval
281}
282
283; Make sure the above canonicalization applies only
284; to pointer types.  Doing otherwise would be illegal.
285
286define i1 @nonnull2(ptr %a) {
287; CHECK-LABEL: @nonnull2(
288; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[A:%.*]], align 4
289; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[LOAD]], 0
290; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
291; CHECK-NEXT:    ret i1 false
292;
293  %load = load i32, ptr %a
294  %cmp = icmp ne i32 %load, 0
295  tail call void @llvm.assume(i1 %cmp)
296  %rval = icmp eq i32 %load, 0
297  ret i1 %rval
298}
299
300; Make sure the above canonicalization does not trigger
301; if the assume is control dependent on something else
302
303define i1 @nonnull3(ptr %a, i1 %control) {
304; FIXME: in the BUNDLES version we could duplicate the load and keep the assume nonnull.
305; DEFAULT-LABEL: @nonnull3(
306; DEFAULT-NEXT:  entry:
307; DEFAULT-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
308; DEFAULT-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
309; DEFAULT-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
310; DEFAULT:       taken:
311; DEFAULT-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
312; DEFAULT-NEXT:    ret i1 false
313; DEFAULT:       not_taken:
314; DEFAULT-NEXT:    [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
315; DEFAULT-NEXT:    ret i1 [[RVAL_2]]
316;
317; BUNDLES-LABEL: @nonnull3(
318; BUNDLES-NEXT:  entry:
319; BUNDLES-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
320; BUNDLES:       taken:
321; BUNDLES-NEXT:    ret i1 false
322; BUNDLES:       not_taken:
323; BUNDLES-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
324; BUNDLES-NEXT:    [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
325; BUNDLES-NEXT:    ret i1 [[RVAL_2]]
326;
327entry:
328  %load = load ptr, ptr %a
329  %cmp = icmp ne ptr %load, null
330  br i1 %control, label %taken, label %not_taken
331taken:
332  tail call void @llvm.assume(i1 %cmp)
333  %rval = icmp eq ptr %load, null
334  ret i1 %rval
335not_taken:
336  %rval.2 = icmp sgt ptr %load, null
337  ret i1 %rval.2
338}
339
340; Make sure the above canonicalization does not trigger
341; if the path from the load to the assume is potentially
342; interrupted by an exception being thrown
343
344define i1 @nonnull4(ptr %a) {
345; DEFAULT-LABEL: @nonnull4(
346; DEFAULT-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
347; DEFAULT-NEXT:    tail call void @escape(ptr [[LOAD]])
348; DEFAULT-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
349; DEFAULT-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
350; DEFAULT-NEXT:    ret i1 false
351;
352; BUNDLES-LABEL: @nonnull4(
353; BUNDLES-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
354; BUNDLES-NEXT:    tail call void @escape(ptr [[LOAD]])
355; BUNDLES-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(ptr [[LOAD]]) ]
356; BUNDLES-NEXT:    ret i1 false
357;
358  %load = load ptr, ptr %a
359  ;; This call may throw!
360  tail call void @escape(ptr %load)
361  %cmp = icmp ne ptr %load, null
362  tail call void @llvm.assume(i1 %cmp)
363  %rval = icmp eq ptr %load, null
364  ret i1 %rval
365}
366define i1 @nonnull5(ptr %a) {
367; CHECK-LABEL: @nonnull5(
368; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
369; CHECK-NEXT:    tail call void @escape(ptr [[LOAD]])
370; CHECK-NEXT:    [[CMP:%.*]] = icmp slt ptr [[LOAD]], null
371; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP]])
372; CHECK-NEXT:    ret i1 false
373;
374  %load = load ptr, ptr %a
375  ;; This call may throw!
376  tail call void @escape(ptr %load)
377  %integral = ptrtoint ptr %load to i64
378  %cmp = icmp slt i64 %integral, 0
379  tail call void @llvm.assume(i1 %cmp) ; %load has at least highest bit set
380  %rval = icmp eq ptr %load, null
381  ret i1 %rval
382}
383
384; PR35846 - https://bugs.llvm.org/show_bug.cgi?id=35846
385
386define i32 @assumption_conflicts_with_known_bits(i32 %a, i32 %b) {
387; CHECK-LABEL: @assumption_conflicts_with_known_bits(
388; CHECK-NEXT:    store i1 true, ptr poison, align 1
389; CHECK-NEXT:    ret i32 1
390;
391  %and1 = and i32 %b, 3
392  %B1 = lshr i32 %and1, %and1
393  %B3 = shl nuw nsw i32 %and1, %B1
394  %cmp = icmp eq i32 %B3, 1
395  tail call void @llvm.assume(i1 %cmp)
396  %cmp2 = icmp eq i32 %B1, %B3
397  tail call void @llvm.assume(i1 %cmp2)
398  ret i32 %and1
399}
400
401; PR37726 - https://bugs.llvm.org/show_bug.cgi?id=37726
402; There's a loophole in eliminating a redundant assumption when
403; we have conflicting assumptions. Verify that debuginfo doesn't
404; get in the way of the fold.
405
406define void @debug_interference(i8 %x) {
407; CHECK-LABEL: @debug_interference(
408; CHECK-NEXT:    tail call void @llvm.dbg.value(metadata i32 5, metadata [[META7:![0-9]+]], metadata !DIExpression()), !dbg [[DBG9:![0-9]+]]
409; CHECK-NEXT:    store i1 true, ptr poison, align 1
410; CHECK-NEXT:    ret void
411;
412  %cmp1 = icmp eq i8 %x, 0
413  %cmp2 = icmp ne i8 %x, 0
414  tail call void @llvm.assume(i1 %cmp1)
415  tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
416  tail call void @llvm.assume(i1 %cmp1)
417  tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
418  tail call void @llvm.assume(i1 %cmp2)
419  tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9
420  tail call void @llvm.assume(i1 %cmp2)
421  ret void
422}
423
424; This would crash.
425; Does it ever make sense to peek through a bitcast of the icmp operand?
426
427define i32 @PR40940(<4 x i8> %x) {
428; CHECK-LABEL: @PR40940(
429; CHECK-NEXT:    [[SHUF:%.*]] = shufflevector <4 x i8> [[X:%.*]], <4 x i8> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 3>
430; CHECK-NEXT:    [[T2:%.*]] = bitcast <4 x i8> [[SHUF]] to i32
431; CHECK-NEXT:    [[T3:%.*]] = icmp ult i32 [[T2]], 65536
432; CHECK-NEXT:    call void @llvm.assume(i1 [[T3]])
433; CHECK-NEXT:    ret i32 [[T2]]
434;
435  %shuf = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 3>
436  %t2 = bitcast <4 x i8> %shuf to i32
437  %t3 = icmp ult i32 %t2, 65536
438  call void @llvm.assume(i1 %t3)
439  ret i32 %t2
440}
441
442define i1 @nonnull3A(ptr %a, i1 %control) {
443; DEFAULT-LABEL: @nonnull3A(
444; DEFAULT-NEXT:  entry:
445; DEFAULT-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
446; DEFAULT-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
447; DEFAULT:       taken:
448; DEFAULT-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
449; DEFAULT-NEXT:    call void @llvm.assume(i1 [[CMP]])
450; DEFAULT-NEXT:    ret i1 [[CMP]]
451; DEFAULT:       not_taken:
452; DEFAULT-NEXT:    [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
453; DEFAULT-NEXT:    ret i1 [[RVAL_2]]
454;
455; BUNDLES-LABEL: @nonnull3A(
456; BUNDLES-NEXT:  entry:
457; BUNDLES-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
458; BUNDLES:       taken:
459; BUNDLES-NEXT:    ret i1 true
460; BUNDLES:       not_taken:
461; BUNDLES-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
462; BUNDLES-NEXT:    [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null
463; BUNDLES-NEXT:    ret i1 [[RVAL_2]]
464;
465entry:
466  %load = load ptr, ptr %a
467  %cmp = icmp ne ptr %load, null
468  br i1 %control, label %taken, label %not_taken
469taken:
470  call void @llvm.assume(i1 %cmp)
471  ret i1 %cmp
472not_taken:
473  call void @llvm.assume(i1 %cmp)
474  %rval.2 = icmp sgt ptr %load, null
475  ret i1 %rval.2
476}
477
478define i1 @nonnull3B(ptr %a, i1 %control) {
479; CHECK-LABEL: @nonnull3B(
480; CHECK-NEXT:  entry:
481; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
482; CHECK:       taken:
483; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
484; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
485; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]]) [ "nonnull"(ptr [[LOAD]]) ]
486; CHECK-NEXT:    ret i1 [[CMP]]
487; CHECK:       not_taken:
488; CHECK-NEXT:    ret i1 [[CONTROL]]
489;
490entry:
491  %load = load ptr, ptr %a
492  %cmp = icmp ne ptr %load, null
493  br i1 %control, label %taken, label %not_taken
494taken:
495  call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
496  ret i1 %cmp
497not_taken:
498  call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
499  ret i1 %control
500}
501
502declare i1 @tmp1(i1)
503
504define i1 @nonnull3C(ptr %a, i1 %control) {
505; CHECK-LABEL: @nonnull3C(
506; CHECK-NEXT:  entry:
507; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
508; CHECK:       taken:
509; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
510; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
511; CHECK-NEXT:    [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]])
512; CHECK-NEXT:    br label [[EXIT:%.*]]
513; CHECK:       exit:
514; CHECK-NEXT:    ret i1 [[CMP2]]
515; CHECK:       not_taken:
516; CHECK-NEXT:    ret i1 [[CONTROL]]
517;
518entry:
519  %load = load ptr, ptr %a
520  %cmp = icmp ne ptr %load, null
521  br i1 %control, label %taken, label %not_taken
522taken:
523  %cmp2 = call i1 @tmp1(i1 %cmp)
524  br label %exit
525exit:
526  ; FIXME: this shouldn't be dropped because it is still dominated by the new position of %load
527  call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
528  ret i1 %cmp2
529not_taken:
530  call void @llvm.assume(i1 %cmp)
531  ret i1 %control
532}
533
534define i1 @nonnull3D(ptr %a, i1 %control) {
535; CHECK-LABEL: @nonnull3D(
536; CHECK-NEXT:  entry:
537; CHECK-NEXT:    br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
538; CHECK:       taken:
539; CHECK-NEXT:    [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
540; CHECK-NEXT:    [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
541; CHECK-NEXT:    [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]])
542; CHECK-NEXT:    br label [[EXIT:%.*]]
543; CHECK:       exit:
544; CHECK-NEXT:    ret i1 [[CMP2]]
545; CHECK:       not_taken:
546; CHECK-NEXT:    ret i1 [[CONTROL]]
547;
548entry:
549  %load = load ptr, ptr %a
550  %cmp = icmp ne ptr %load, null
551  br i1 %control, label %taken, label %not_taken
552taken:
553  %cmp2 = call i1 @tmp1(i1 %cmp)
554  br label %exit
555exit:
556  ret i1 %cmp2
557not_taken:
558  call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
559  ret i1 %control
560}
561
562
563define void @always_true_assumption() {
564; CHECK-LABEL: @always_true_assumption(
565; CHECK-NEXT:    ret void
566;
567  call void @llvm.assume(i1 true)
568  ret void
569}
570
571; The alloca guarantees that the low bits of %a are zero because of alignment.
572; The assume says the opposite. Make sure we don't crash.
573
574define i64 @PR31809() {
575; CHECK-LABEL: @PR31809(
576; CHECK-NEXT:    store i1 true, ptr poison, align 1
577; CHECK-NEXT:    ret i64 poison
578;
579  %a = alloca i32
580  %t1 = ptrtoint ptr %a to i64
581  %cond = icmp eq i64 %t1, 3
582  call void @llvm.assume(i1 %cond)
583  ret i64 %t1
584}
585
586; Similar to above: there's no way to know which assumption is truthful,
587; so just don't crash.
588
589define i8 @conflicting_assumptions(i8 %x){
590; CHECK-LABEL: @conflicting_assumptions(
591; CHECK-NEXT:    store i1 true, ptr poison, align 1
592; CHECK-NEXT:    ret i8 poison
593;
594  %add = add i8 %x, 1
595  %cond1 = icmp eq i8 %x, 3
596  call void @llvm.assume(i1 %cond1)
597  %cond2 = icmp eq i8 %x, 4
598  call void @llvm.assume(i1 %cond2)
599  ret i8 %add
600}
601
602; Another case of conflicting assumptions. This would crash because we'd
603; try to set more known bits than existed in the known bits struct.
604
605define void @PR36270(i32 %b) {
606; CHECK-LABEL: @PR36270(
607; CHECK-NEXT:    unreachable
608;
609  %B7 = xor i32 -1, 2147483647
610  %and1 = and i32 %b, 3
611  %B12 = lshr i32 %B7, %and1
612  %C1 = icmp ult i32 %and1, %B12
613  tail call void @llvm.assume(i1 %C1)
614  %cmp2 = icmp eq i32 0, %B12
615  tail call void @llvm.assume(i1 %cmp2)
616  unreachable
617}
618
619; PR47416
620
621define i32 @unreachable_assume(i32 %x, i32 %y) {
622; CHECK-LABEL: @unreachable_assume(
623; CHECK-NEXT:  entry:
624; CHECK-NEXT:    [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
625; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
626; CHECK-NEXT:    [[OR:%.*]] = or i1 [[CMP0]], [[CMP1]]
627; CHECK-NEXT:    tail call void @llvm.assume(i1 [[OR]])
628; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[X]], 1
629; CHECK-NEXT:    br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
630; CHECK:       if:
631; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], -2
632; CHECK-NEXT:    [[CMP3:%.*]] = icmp ne i32 [[A]], 104
633; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP3]])
634; CHECK-NEXT:    br label [[EXIT]]
635; CHECK:       exit:
636; CHECK-NEXT:    unreachable
637;
638entry:
639  %cmp0 = icmp sgt i32 %x, 1
640  %cmp1 = icmp eq i32 %y, 1
641  %or = or i1 %cmp0, %cmp1
642  tail call void @llvm.assume(i1 %or)
643  %cmp2 = icmp eq i32 %x, 1
644  br i1 %cmp2, label %if, label %exit
645
646if:
647  %a = and i32 %y, -2
648  %cmp3 = icmp ne i32 %a, 104
649  tail call void @llvm.assume(i1 %cmp3)
650  br label %exit
651
652exit:
653  %cmp4 = icmp eq i32 %x, 2
654  tail call void @llvm.assume(i1 %cmp4)
655  unreachable
656}
657
658define i32 @unreachable_assume_logical(i32 %x, i32 %y) {
659; CHECK-LABEL: @unreachable_assume_logical(
660; CHECK-NEXT:  entry:
661; CHECK-NEXT:    [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
662; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
663; CHECK-NEXT:    [[OR:%.*]] = select i1 [[CMP0]], i1 true, i1 [[CMP1]]
664; CHECK-NEXT:    tail call void @llvm.assume(i1 [[OR]])
665; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[X]], 1
666; CHECK-NEXT:    br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
667; CHECK:       if:
668; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], -2
669; CHECK-NEXT:    [[CMP3:%.*]] = icmp ne i32 [[A]], 104
670; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP3]])
671; CHECK-NEXT:    br label [[EXIT]]
672; CHECK:       exit:
673; CHECK-NEXT:    unreachable
674;
675entry:
676  %cmp0 = icmp sgt i32 %x, 1
677  %cmp1 = icmp eq i32 %y, 1
678  %or = select i1 %cmp0, i1 true, i1 %cmp1
679  tail call void @llvm.assume(i1 %or)
680  %cmp2 = icmp eq i32 %x, 1
681  br i1 %cmp2, label %if, label %exit
682
683if:
684  %a = and i32 %y, -2
685  %cmp3 = icmp ne i32 %a, 104
686  tail call void @llvm.assume(i1 %cmp3)
687  br label %exit
688
689exit:
690  %cmp4 = icmp eq i32 %x, 2
691  tail call void @llvm.assume(i1 %cmp4)
692  unreachable
693}
694
695define i32 @unreachable_assumes_and_store(i32 %x, i32 %y, ptr %p) {
696; CHECK-LABEL: @unreachable_assumes_and_store(
697; CHECK-NEXT:  entry:
698; CHECK-NEXT:    [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
699; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
700; CHECK-NEXT:    [[OR:%.*]] = or i1 [[CMP0]], [[CMP1]]
701; CHECK-NEXT:    tail call void @llvm.assume(i1 [[OR]])
702; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[X]], 1
703; CHECK-NEXT:    br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
704; CHECK:       if:
705; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], -2
706; CHECK-NEXT:    [[CMP3:%.*]] = icmp ne i32 [[A]], 104
707; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP3]])
708; CHECK-NEXT:    br label [[EXIT]]
709; CHECK:       exit:
710; CHECK-NEXT:    unreachable
711;
712entry:
713  %cmp0 = icmp sgt i32 %x, 1
714  %cmp1 = icmp eq i32 %y, 1
715  %or = or i1 %cmp0, %cmp1
716  tail call void @llvm.assume(i1 %or)
717  %cmp2 = icmp eq i32 %x, 1
718  br i1 %cmp2, label %if, label %exit
719
720if:
721  %a = and i32 %y, -2
722  %cmp3 = icmp ne i32 %a, 104
723  tail call void @llvm.assume(i1 %cmp3)
724  br label %exit
725
726exit:
727  %cmp4 = icmp eq i32 %x, 2
728  tail call void @llvm.assume(i1 %cmp4)
729  %cmp5 = icmp ugt i32 %y, 42
730  tail call void @llvm.assume(i1 %cmp5)
731  store i32 %x, ptr %p
732  unreachable
733}
734
735define i32 @unreachable_assumes_and_store_logical(i32 %x, i32 %y, ptr %p) {
736; CHECK-LABEL: @unreachable_assumes_and_store_logical(
737; CHECK-NEXT:  entry:
738; CHECK-NEXT:    [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1
739; CHECK-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1
740; CHECK-NEXT:    [[OR:%.*]] = select i1 [[CMP0]], i1 true, i1 [[CMP1]]
741; CHECK-NEXT:    tail call void @llvm.assume(i1 [[OR]])
742; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[X]], 1
743; CHECK-NEXT:    br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]]
744; CHECK:       if:
745; CHECK-NEXT:    [[A:%.*]] = and i32 [[Y]], -2
746; CHECK-NEXT:    [[CMP3:%.*]] = icmp ne i32 [[A]], 104
747; CHECK-NEXT:    tail call void @llvm.assume(i1 [[CMP3]])
748; CHECK-NEXT:    br label [[EXIT]]
749; CHECK:       exit:
750; CHECK-NEXT:    unreachable
751;
752entry:
753  %cmp0 = icmp sgt i32 %x, 1
754  %cmp1 = icmp eq i32 %y, 1
755  %or = select i1 %cmp0, i1 true, i1 %cmp1
756  tail call void @llvm.assume(i1 %or)
757  %cmp2 = icmp eq i32 %x, 1
758  br i1 %cmp2, label %if, label %exit
759
760if:
761  %a = and i32 %y, -2
762  %cmp3 = icmp ne i32 %a, 104
763  tail call void @llvm.assume(i1 %cmp3)
764  br label %exit
765
766exit:
767  %cmp4 = icmp eq i32 %x, 2
768  tail call void @llvm.assume(i1 %cmp4)
769  %cmp5 = icmp ugt i32 %y, 42
770  tail call void @llvm.assume(i1 %cmp5)
771  store i32 %x, ptr %p
772  unreachable
773}
774
775define void @canonicalize_assume(ptr %0) {
776; DEFAULT-LABEL: @canonicalize_assume(
777; DEFAULT-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP0:%.*]], i64 2
778; DEFAULT-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 16) ]
779; DEFAULT-NEXT:    ret void
780;
781; BUNDLES-LABEL: @canonicalize_assume(
782; BUNDLES-NEXT:    call void @llvm.assume(i1 true) [ "align"(ptr [[TMP0:%.*]], i64 8) ]
783; BUNDLES-NEXT:    ret void
784;
785  %2 = getelementptr inbounds i32, ptr %0, i64 2
786  call void @llvm.assume(i1 true) [ "align"(ptr %2, i64 16) ]
787  ret void
788}
789
790define void @assume_makes_and_known_assume_on_arg(ptr %p, i32 %x) {
791; CHECK-LABEL: @assume_makes_and_known_assume_on_arg(
792; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 1
793; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
794; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
795; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
796; CHECK-NEXT:    ret void
797;
798  %and = and i32 %x, 1
799  %cmp = icmp eq i32 %and, 0
800  call void @llvm.assume(i1 %cmp)
801  %and2 = and i32 %x, 1
802  store i32 %and2, ptr %p
803  ret void
804}
805
806define void @assume_makes_and_known_assume_on_mul(ptr %p, i32 %a, i32 %b) {
807; CHECK-LABEL: @assume_makes_and_known_assume_on_mul(
808; CHECK-NEXT:    [[X:%.*]] = mul i32 [[A:%.*]], [[B:%.*]]
809; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X]], 1
810; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
811; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
812; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
813; CHECK-NEXT:    ret void
814;
815  %x = mul i32 %a, %b
816  %and = and i32 %x, 1
817  %cmp = icmp eq i32 %and, 0
818  call void @llvm.assume(i1 %cmp)
819  %and2 = and i32 %x, 1
820  store i32 %and2, ptr %p
821  ret void
822}
823
824define void @assume_makes_and_known_assume_on_bitwise(ptr %p, i32 %a, i32 %b) {
825; CHECK-LABEL: @assume_makes_and_known_assume_on_bitwise(
826; CHECK-NEXT:    [[X:%.*]] = or i32 [[A:%.*]], [[B:%.*]]
827; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X]], 1
828; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
829; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
830; CHECK-NEXT:    store i32 0, ptr [[P:%.*]], align 4
831; CHECK-NEXT:    ret void
832;
833  %x = or i32 %a, %b
834  %and = and i32 %x, 1
835  %cmp = icmp eq i32 %and, 0
836  call void @llvm.assume(i1 %cmp)
837  %and2 = and i32 %x, 1
838  store i32 %and2, ptr %p
839  ret void
840}
841
842define i32 @range_16_31_top28(i32 %x) {
843; CHECK-LABEL: @range_16_31_top28(
844; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], -16
845; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP1]], 16
846; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
847; CHECK-NEXT:    ret i32 16
848;
849  %add = add i32 %x, -16
850  %cmp = icmp ult i32 %add, 16
851  call void @llvm.assume(i1 %cmp)
852  %res = and i32 %x, u0xfffffff0
853  ret i32 %res
854}
855
856define i32 @range_16_31_top29(i32 %x) {
857; CHECK-LABEL: @range_16_31_top29(
858; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], -16
859; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[TMP1]], 16
860; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
861; CHECK-NEXT:    [[RES:%.*]] = and i32 [[X]], 24
862; CHECK-NEXT:    ret i32 [[RES]]
863;
864  %add = add i32 %x, -16
865  %cmp = icmp ult i32 %add, 16
866  call void @llvm.assume(i1 %cmp)
867  %res = and i32 %x, u0xfffffff8
868  ret i32 %res
869}
870
871define i32 @range_16_30_top28(i32 %x) {
872; CHECK-LABEL: @range_16_30_top28(
873; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -16
874; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 15
875; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
876; CHECK-NEXT:    ret i32 16
877;
878  %add = add i32 %x, -16
879  %cmp = icmp ult i32 %add, 15
880  call void @llvm.assume(i1 %cmp)
881  %res = and i32 %x, u0xfffffff0
882  ret i32 %res
883}
884
885define i32 @range_16_32_top28(i32 %x) {
886; CHECK-LABEL: @range_16_32_top28(
887; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -16
888; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 17
889; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
890; CHECK-NEXT:    [[RES:%.*]] = and i32 [[X]], 48
891; CHECK-NEXT:    ret i32 [[RES]]
892;
893  %add = add i32 %x, -16
894  %cmp = icmp ult i32 %add, 17
895  call void @llvm.assume(i1 %cmp)
896  %res = and i32 %x, u0xfffffff0
897  ret i32 %res
898}
899
900define i32 @range_16_32_top27(i32 %x) {
901; CHECK-LABEL: @range_16_32_top27(
902; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -16
903; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 17
904; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
905; CHECK-NEXT:    [[RES:%.*]] = and i32 [[X]], 32
906; CHECK-NEXT:    ret i32 [[RES]]
907;
908  %add = add i32 %x, -16
909  %cmp = icmp ult i32 %add, 17
910  call void @llvm.assume(i1 %cmp)
911  %res = and i32 %x, u0xffffffe0
912  ret i32 %res
913}
914
915define i32 @range_16_32_top26(i32 %x) {
916; CHECK-LABEL: @range_16_32_top26(
917; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -16
918; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 17
919; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
920; CHECK-NEXT:    ret i32 0
921;
922  %add = add i32 %x, -16
923  %cmp = icmp ult i32 %add, 17
924  call void @llvm.assume(i1 %cmp)
925  %res = and i32 %x, u0xffffffc0
926  ret i32 %res
927}
928
929define i32 @range_15_31_top28(i32 %x) {
930; CHECK-LABEL: @range_15_31_top28(
931; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -15
932; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 16
933; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
934; CHECK-NEXT:    [[RES:%.*]] = and i32 [[X]], 16
935; CHECK-NEXT:    ret i32 [[RES]]
936;
937  %add = add i32 %x, -15
938  %cmp = icmp ult i32 %add, 16
939  call void @llvm.assume(i1 %cmp)
940  %res = and i32 %x, u0xfffffff0
941  ret i32 %res
942}
943
944define i32 @range_15_31_top27(i32 %x) {
945; CHECK-LABEL: @range_15_31_top27(
946; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[X:%.*]], -15
947; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], 16
948; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
949; CHECK-NEXT:    ret i32 0
950;
951  %add = add i32 %x, -15
952  %cmp = icmp ult i32 %add, 16
953  call void @llvm.assume(i1 %cmp)
954  %res = and i32 %x, u0xffffffe0
955  ret i32 %res
956}
957
958declare void @llvm.dbg.value(metadata, metadata, metadata)
959
960!llvm.dbg.cu = !{!0}
961!llvm.module.flags = !{!5, !6, !7, !8}
962
963!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !3, producer: "Me", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: null, retainedTypes: null, imports: null)
964!1 = !DILocalVariable(name: "", arg: 1, scope: !2, file: null, line: 1, type: null)
965!2 = distinct !DISubprogram(name: "debug", linkageName: "debug", scope: null, file: null, line: 0, type: null, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0)
966!3 = !DIFile(filename: "consecutive-fences.ll", directory: "")
967!5 = !{i32 2, !"Dwarf Version", i32 4}
968!6 = !{i32 2, !"Debug Info Version", i32 3}
969!7 = !{i32 1, !"wchar_size", i32 4}
970!8 = !{i32 7, !"PIC Level", i32 2}
971!9 = !DILocation(line: 0, column: 0, scope: !2)
972
973
974attributes #0 = { nounwind uwtable }
975attributes #1 = { nounwind }
976
977