1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=instcombine -S | FileCheck --check-prefixes=CHECK,DEFAULT %s 3; RUN: opt < %s -passes=instcombine --enable-knowledge-retention -S | FileCheck --check-prefixes=CHECK,BUNDLES %s 4 5; RUN: opt < %s -passes=instcombine -S --try-experimental-debuginfo-iterators | FileCheck --check-prefixes=CHECK,DEFAULT %s 6; RUN: opt < %s -passes=instcombine --enable-knowledge-retention -S --try-experimental-debuginfo-iterators | FileCheck --check-prefixes=CHECK,BUNDLES %s 7 8target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 9target triple = "x86_64-unknown-linux-gnu" 10 11declare void @llvm.assume(i1) #1 12 13; Check that the assume has not been removed: 14 15define i32 @foo1(ptr %a) #0 { 16; DEFAULT-LABEL: @foo1( 17; DEFAULT-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4 18; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64 19; DEFAULT-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 20; DEFAULT-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 21; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) 22; DEFAULT-NEXT: ret i32 [[T0]] 23; 24; BUNDLES-LABEL: @foo1( 25; BUNDLES-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4 26; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 32) ] 27; BUNDLES-NEXT: ret i32 [[T0]] 28; 29 %t0 = load i32, ptr %a, align 4 30 %ptrint = ptrtoint ptr %a to i64 31 %maskedptr = and i64 %ptrint, 31 32 %maskcond = icmp eq i64 %maskedptr, 0 33 tail call void @llvm.assume(i1 %maskcond) 34 ret i32 %t0 35} 36 37define i32 @align_assume_trunc_cond(ptr %a) #0 { 38; DEFAULT-LABEL: @align_assume_trunc_cond( 39; DEFAULT-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4 40; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A]] to i64 41; DEFAULT-NEXT: [[TRUNC:%.*]] = trunc i64 [[PTRINT]] to i1 42; DEFAULT-NEXT: [[MASKCOND:%.*]] = xor i1 [[TRUNC]], true 43; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) 44; DEFAULT-NEXT: ret i32 [[T0]] 45; 46; BUNDLES-LABEL: @align_assume_trunc_cond( 47; BUNDLES-NEXT: [[T0:%.*]] = load i32, ptr [[A:%.*]], align 4 48; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 2) ] 49; BUNDLES-NEXT: ret i32 [[T0]] 50; 51 %t0 = load i32, ptr %a, align 4 52 %ptrint = ptrtoint ptr %a to i64 53 %trunc = trunc i64 %ptrint to i1 54 %maskcond = xor i1 %trunc, true 55 tail call void @llvm.assume(i1 %maskcond) 56 ret i32 %t0 57} 58 59; Same check as in @foo1, but make sure it works if the assume is first too. 60 61define i32 @foo2(ptr %a) #0 { 62; DEFAULT-LABEL: @foo2( 63; DEFAULT-NEXT: [[PTRINT:%.*]] = ptrtoint ptr [[A:%.*]] to i64 64; DEFAULT-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 65; DEFAULT-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 66; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) 67; DEFAULT-NEXT: [[T0:%.*]] = load i32, ptr [[A]], align 4 68; DEFAULT-NEXT: ret i32 [[T0]] 69; 70; BUNDLES-LABEL: @foo2( 71; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A:%.*]], i64 32) ] 72; BUNDLES-NEXT: [[T0:%.*]] = load i32, ptr [[A]], align 4 73; BUNDLES-NEXT: ret i32 [[T0]] 74; 75 %ptrint = ptrtoint ptr %a to i64 76 %maskedptr = and i64 %ptrint, 31 77 %maskcond = icmp eq i64 %maskedptr, 0 78 tail call void @llvm.assume(i1 %maskcond) 79 %t0 = load i32, ptr %a, align 4 80 ret i32 %t0 81} 82 83define i32 @simple(i32 %a) #1 { 84; CHECK-LABEL: @simple( 85; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 4 86; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 87; CHECK-NEXT: ret i32 [[A]] 88; 89 %cmp = icmp eq i32 %a, 4 90 tail call void @llvm.assume(i1 %cmp) 91 ret i32 %a 92} 93 94define i32 @can1(i1 %a, i1 %b, i1 %c) { 95; CHECK-LABEL: @can1( 96; CHECK-NEXT: call void @llvm.assume(i1 [[A:%.*]]) 97; CHECK-NEXT: call void @llvm.assume(i1 [[B:%.*]]) 98; CHECK-NEXT: call void @llvm.assume(i1 [[C:%.*]]) 99; CHECK-NEXT: ret i32 5 100; 101 %and1 = and i1 %a, %b 102 %and = and i1 %and1, %c 103 tail call void @llvm.assume(i1 %and) 104 ret i32 5 105} 106 107define i32 @can1_logical(i1 %a, i1 %b, i1 %c) { 108; CHECK-LABEL: @can1_logical( 109; CHECK-NEXT: call void @llvm.assume(i1 [[A:%.*]]) 110; CHECK-NEXT: call void @llvm.assume(i1 [[B:%.*]]) 111; CHECK-NEXT: call void @llvm.assume(i1 [[C:%.*]]) 112; CHECK-NEXT: ret i32 5 113; 114 %and1 = select i1 %a, i1 %b, i1 false 115 %and = select i1 %and1, i1 %c, i1 false 116 tail call void @llvm.assume(i1 %and) 117 ret i32 5 118} 119 120define i32 @can2(i1 %a, i1 %b, i1 %c) { 121; CHECK-LABEL: @can2( 122; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true 123; CHECK-NEXT: call void @llvm.assume(i1 [[TMP1]]) 124; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[B:%.*]], true 125; CHECK-NEXT: call void @llvm.assume(i1 [[TMP2]]) 126; CHECK-NEXT: ret i32 5 127; 128 %v = or i1 %a, %b 129 %w = xor i1 %v, 1 130 tail call void @llvm.assume(i1 %w) 131 ret i32 5 132} 133 134define i32 @can2_logical(i1 %a, i1 %b, i1 %c) { 135; CHECK-LABEL: @can2_logical( 136; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true 137; CHECK-NEXT: call void @llvm.assume(i1 [[TMP1]]) 138; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[B:%.*]], true 139; CHECK-NEXT: call void @llvm.assume(i1 [[TMP2]]) 140; CHECK-NEXT: ret i32 5 141; 142 %v = select i1 %a, i1 true, i1 %b 143 %w = xor i1 %v, 1 144 tail call void @llvm.assume(i1 %w) 145 ret i32 5 146} 147 148define i32 @bar1(i32 %a) #0 { 149; CHECK-LABEL: @bar1( 150; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], 7 151; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1 152; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 153; CHECK-NEXT: ret i32 1 154; 155 %and1 = and i32 %a, 3 156 %and = and i32 %a, 7 157 %cmp = icmp eq i32 %and, 1 158 tail call void @llvm.assume(i1 %cmp) 159 ret i32 %and1 160} 161 162define i32 @bar2(i32 %a) #0 { 163; CHECK-LABEL: @bar2( 164; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], 7 165; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1 166; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 167; CHECK-NEXT: ret i32 1 168; 169 %and = and i32 %a, 7 170 %cmp = icmp eq i32 %and, 1 171 tail call void @llvm.assume(i1 %cmp) 172 %and1 = and i32 %a, 3 173 ret i32 %and1 174} 175 176define i32 @bar3(i32 %a, i1 %x, i1 %y) #0 { 177; CHECK-LABEL: @bar3( 178; CHECK-NEXT: entry: 179; CHECK-NEXT: tail call void @llvm.assume(i1 [[X:%.*]]) 180; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], 7 181; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1 182; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 183; CHECK-NEXT: tail call void @llvm.assume(i1 [[Y:%.*]]) 184; CHECK-NEXT: ret i32 1 185; 186entry: 187 %and1 = and i32 %a, 3 188 189; Don't be fooled by other assumes around. 190 191 tail call void @llvm.assume(i1 %x) 192 193 %and = and i32 %a, 7 194 %cmp = icmp eq i32 %and, 1 195 tail call void @llvm.assume(i1 %cmp) 196 197 tail call void @llvm.assume(i1 %y) 198 199 ret i32 %and1 200} 201 202; If we allow recursive known bits queries based on 203; assumptions, we could do better here: 204; a == b and a & 7 == 1, so b & 7 == 1, so b & 3 == 1, so return 1. 205 206define i32 @known_bits_recursion_via_assumes(i32 %a, i32 %b) { 207; CHECK-LABEL: @known_bits_recursion_via_assumes( 208; CHECK-NEXT: entry: 209; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B:%.*]], 3 210; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], 7 211; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1 212; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 213; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[A]], [[B]] 214; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP2]]) 215; CHECK-NEXT: ret i32 [[AND1]] 216; 217entry: 218 %and1 = and i32 %b, 3 219 %and = and i32 %a, 7 220 %cmp = icmp eq i32 %and, 1 221 tail call void @llvm.assume(i1 %cmp) 222 %cmp2 = icmp eq i32 %a, %b 223 tail call void @llvm.assume(i1 %cmp2) 224 ret i32 %and1 225} 226 227define i32 @icmp1(i32 %a) #0 { 228; CHECK-LABEL: @icmp1( 229; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5 230; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 231; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 232; CHECK-NEXT: ret i32 [[CONV]] 233; 234 %cmp = icmp sgt i32 %a, 5 235 tail call void @llvm.assume(i1 %cmp) 236 %conv = zext i1 %cmp to i32 237 ret i32 %conv 238} 239 240define i32 @icmp2(i32 %a) #0 { 241; CHECK-LABEL: @icmp2( 242; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5 243; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 244; CHECK-NEXT: ret i32 0 245; 246 %cmp = icmp sgt i32 %a, 5 247 tail call void @llvm.assume(i1 %cmp) 248 %t0 = zext i1 %cmp to i32 249 %lnot.ext = xor i32 %t0, 1 250 ret i32 %lnot.ext 251} 252 253; If the 'not' of a condition is known true, then the condition must be false. 254 255define i1 @assume_not(i1 %cond) { 256; CHECK-LABEL: @assume_not( 257; CHECK-NEXT: [[NOTCOND:%.*]] = xor i1 [[COND:%.*]], true 258; CHECK-NEXT: call void @llvm.assume(i1 [[NOTCOND]]) 259; CHECK-NEXT: ret i1 [[COND]] 260; 261 %notcond = xor i1 %cond, true 262 call void @llvm.assume(i1 %notcond) 263 ret i1 %cond 264} 265 266declare void @escape(ptr %a) 267 268; Canonicalize a nonnull assumption on a load into metadata form. 269 270define i32 @bundle1(ptr %P) { 271; CHECK-LABEL: @bundle1( 272; CHECK-NEXT: tail call void @llvm.assume(i1 true) [ "nonnull"(ptr [[P:%.*]]) ] 273; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P]], align 4 274; CHECK-NEXT: ret i32 [[LOAD]] 275; 276 tail call void @llvm.assume(i1 true) ["nonnull"(ptr %P)] 277 %load = load i32, ptr %P 278 ret i32 %load 279} 280 281define i32 @bundle2(ptr %P) { 282; CHECK-LABEL: @bundle2( 283; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P:%.*]], align 4 284; CHECK-NEXT: ret i32 [[LOAD]] 285; 286 tail call void @llvm.assume(i1 true) ["ignore"(ptr undef)] 287 %load = load i32, ptr %P 288 ret i32 %load 289} 290 291define i1 @nonnull1(ptr %a) { 292; CHECK-LABEL: @nonnull1( 293; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8, !nonnull [[META6:![0-9]+]], !noundef [[META6]] 294; CHECK-NEXT: tail call void @escape(ptr nonnull [[LOAD]]) 295; CHECK-NEXT: ret i1 false 296; 297 %load = load ptr, ptr %a 298 %cmp = icmp ne ptr %load, null 299 tail call void @llvm.assume(i1 %cmp) 300 tail call void @escape(ptr %load) 301 %rval = icmp eq ptr %load, null 302 ret i1 %rval 303} 304 305; Make sure the above canonicalization applies only 306; to pointer types. Doing otherwise would be illegal. 307 308define i1 @nonnull2(ptr %a) { 309; CHECK-LABEL: @nonnull2( 310; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[A:%.*]], align 4 311; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LOAD]], 0 312; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 313; CHECK-NEXT: ret i1 false 314; 315 %load = load i32, ptr %a 316 %cmp = icmp ne i32 %load, 0 317 tail call void @llvm.assume(i1 %cmp) 318 %rval = icmp eq i32 %load, 0 319 ret i1 %rval 320} 321 322; Make sure the above canonicalization does not trigger 323; if the assume is control dependent on something else 324 325define i1 @nonnull3(ptr %a, i1 %control) { 326; FIXME: in the BUNDLES version we could duplicate the load and keep the assume nonnull. 327; DEFAULT-LABEL: @nonnull3( 328; DEFAULT-NEXT: entry: 329; DEFAULT-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 330; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null 331; DEFAULT-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]] 332; DEFAULT: taken: 333; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 334; DEFAULT-NEXT: ret i1 false 335; DEFAULT: not_taken: 336; DEFAULT-NEXT: [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null 337; DEFAULT-NEXT: ret i1 [[RVAL_2]] 338; 339; BUNDLES-LABEL: @nonnull3( 340; BUNDLES-NEXT: entry: 341; BUNDLES-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]] 342; BUNDLES: taken: 343; BUNDLES-NEXT: ret i1 false 344; BUNDLES: not_taken: 345; BUNDLES-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 346; BUNDLES-NEXT: [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null 347; BUNDLES-NEXT: ret i1 [[RVAL_2]] 348; 349entry: 350 %load = load ptr, ptr %a 351 %cmp = icmp ne ptr %load, null 352 br i1 %control, label %taken, label %not_taken 353taken: 354 tail call void @llvm.assume(i1 %cmp) 355 %rval = icmp eq ptr %load, null 356 ret i1 %rval 357not_taken: 358 %rval.2 = icmp sgt ptr %load, null 359 ret i1 %rval.2 360} 361 362; Make sure the above canonicalization does not trigger 363; if the path from the load to the assume is potentially 364; interrupted by an exception being thrown 365 366define i1 @nonnull4(ptr %a) { 367; DEFAULT-LABEL: @nonnull4( 368; DEFAULT-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 369; DEFAULT-NEXT: tail call void @escape(ptr [[LOAD]]) 370; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null 371; DEFAULT-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 372; DEFAULT-NEXT: ret i1 false 373; 374; BUNDLES-LABEL: @nonnull4( 375; BUNDLES-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 376; BUNDLES-NEXT: tail call void @escape(ptr [[LOAD]]) 377; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(ptr [[LOAD]]) ] 378; BUNDLES-NEXT: ret i1 false 379; 380 %load = load ptr, ptr %a 381 ;; This call may throw! 382 tail call void @escape(ptr %load) 383 %cmp = icmp ne ptr %load, null 384 tail call void @llvm.assume(i1 %cmp) 385 %rval = icmp eq ptr %load, null 386 ret i1 %rval 387} 388define i1 @nonnull5(ptr %a) { 389; CHECK-LABEL: @nonnull5( 390; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 391; CHECK-NEXT: tail call void @escape(ptr [[LOAD]]) 392; CHECK-NEXT: [[CMP:%.*]] = icmp slt ptr [[LOAD]], null 393; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]]) 394; CHECK-NEXT: ret i1 false 395; 396 %load = load ptr, ptr %a 397 ;; This call may throw! 398 tail call void @escape(ptr %load) 399 %integral = ptrtoint ptr %load to i64 400 %cmp = icmp slt i64 %integral, 0 401 tail call void @llvm.assume(i1 %cmp) ; %load has at least highest bit set 402 %rval = icmp eq ptr %load, null 403 ret i1 %rval 404} 405 406; PR35846 - https://bugs.llvm.org/show_bug.cgi?id=35846 407 408define i32 @assumption_conflicts_with_known_bits(i32 %a, i32 %b) { 409; CHECK-LABEL: @assumption_conflicts_with_known_bits( 410; CHECK-NEXT: store i1 true, ptr poison, align 1 411; CHECK-NEXT: ret i32 poison 412; 413 %and1 = and i32 %b, 3 414 %B1 = lshr i32 %and1, %and1 415 %B3 = shl nuw nsw i32 %and1, %B1 416 %cmp = icmp eq i32 %B3, 1 417 tail call void @llvm.assume(i1 %cmp) 418 %cmp2 = icmp eq i32 %B1, %B3 419 tail call void @llvm.assume(i1 %cmp2) 420 ret i32 %and1 421} 422 423; PR37726 - https://bugs.llvm.org/show_bug.cgi?id=37726 424; There's a loophole in eliminating a redundant assumption when 425; we have conflicting assumptions. Verify that debuginfo doesn't 426; get in the way of the fold. 427 428define void @debug_interference(i8 %x) { 429; CHECK-LABEL: @debug_interference( 430; CHECK-NEXT: #dbg_value(i32 5, [[META7:![0-9]+]], !DIExpression(), [[META9:![0-9]+]]) 431; CHECK-NEXT: store i1 true, ptr poison, align 1 432; CHECK-NEXT: ret void 433; 434 %cmp1 = icmp eq i8 %x, 0 435 %cmp2 = icmp ne i8 %x, 0 436 tail call void @llvm.assume(i1 %cmp1) 437 tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9 438 tail call void @llvm.assume(i1 %cmp1) 439 tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9 440 tail call void @llvm.assume(i1 %cmp2) 441 tail call void @llvm.dbg.value(metadata i32 5, metadata !1, metadata !DIExpression()), !dbg !9 442 tail call void @llvm.assume(i1 %cmp2) 443 ret void 444} 445 446; This would crash. 447; Does it ever make sense to peek through a bitcast of the icmp operand? 448 449define i32 @PR40940(<4 x i8> %x) { 450; CHECK-LABEL: @PR40940( 451; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i8> [[X:%.*]], <4 x i8> poison, <4 x i32> <i32 1, i32 1, i32 2, i32 3> 452; CHECK-NEXT: [[T2:%.*]] = bitcast <4 x i8> [[SHUF]] to i32 453; CHECK-NEXT: [[T3:%.*]] = icmp ult i32 [[T2]], 65536 454; CHECK-NEXT: call void @llvm.assume(i1 [[T3]]) 455; CHECK-NEXT: ret i32 [[T2]] 456; 457 %shuf = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 1, i32 1, i32 2, i32 3> 458 %t2 = bitcast <4 x i8> %shuf to i32 459 %t3 = icmp ult i32 %t2, 65536 460 call void @llvm.assume(i1 %t3) 461 ret i32 %t2 462} 463 464define i1 @nonnull3A(ptr %a, i1 %control) { 465; DEFAULT-LABEL: @nonnull3A( 466; DEFAULT-NEXT: entry: 467; DEFAULT-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 468; DEFAULT-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]] 469; DEFAULT: taken: 470; DEFAULT-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null 471; DEFAULT-NEXT: call void @llvm.assume(i1 [[CMP]]) 472; DEFAULT-NEXT: ret i1 [[CMP]] 473; DEFAULT: not_taken: 474; DEFAULT-NEXT: [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null 475; DEFAULT-NEXT: ret i1 [[RVAL_2]] 476; 477; BUNDLES-LABEL: @nonnull3A( 478; BUNDLES-NEXT: entry: 479; BUNDLES-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]] 480; BUNDLES: taken: 481; BUNDLES-NEXT: ret i1 true 482; BUNDLES: not_taken: 483; BUNDLES-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 484; BUNDLES-NEXT: [[RVAL_2:%.*]] = icmp sgt ptr [[LOAD]], null 485; BUNDLES-NEXT: ret i1 [[RVAL_2]] 486; 487entry: 488 %load = load ptr, ptr %a 489 %cmp = icmp ne ptr %load, null 490 br i1 %control, label %taken, label %not_taken 491taken: 492 call void @llvm.assume(i1 %cmp) 493 ret i1 %cmp 494not_taken: 495 call void @llvm.assume(i1 %cmp) 496 %rval.2 = icmp sgt ptr %load, null 497 ret i1 %rval.2 498} 499 500define i1 @nonnull3B(ptr %a, i1 %control) { 501; CHECK-LABEL: @nonnull3B( 502; CHECK-NEXT: entry: 503; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]] 504; CHECK: taken: 505; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 506; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null 507; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) [ "nonnull"(ptr [[LOAD]]) ] 508; CHECK-NEXT: ret i1 [[CMP]] 509; CHECK: not_taken: 510; CHECK-NEXT: ret i1 false 511; 512entry: 513 %load = load ptr, ptr %a 514 %cmp = icmp ne ptr %load, null 515 br i1 %control, label %taken, label %not_taken 516taken: 517 call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)] 518 ret i1 %cmp 519not_taken: 520 call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)] 521 ret i1 %control 522} 523 524declare i1 @tmp1(i1) 525 526define i1 @nonnull3C(ptr %a, i1 %control) { 527; CHECK-LABEL: @nonnull3C( 528; CHECK-NEXT: entry: 529; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]] 530; CHECK: taken: 531; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 532; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null 533; CHECK-NEXT: [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]]) 534; CHECK-NEXT: br label [[EXIT:%.*]] 535; CHECK: exit: 536; CHECK-NEXT: ret i1 [[CMP2]] 537; CHECK: not_taken: 538; CHECK-NEXT: ret i1 false 539; 540entry: 541 %load = load ptr, ptr %a 542 %cmp = icmp ne ptr %load, null 543 br i1 %control, label %taken, label %not_taken 544taken: 545 %cmp2 = call i1 @tmp1(i1 %cmp) 546 br label %exit 547exit: 548 ; FIXME: this shouldn't be dropped because it is still dominated by the new position of %load 549 call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)] 550 ret i1 %cmp2 551not_taken: 552 call void @llvm.assume(i1 %cmp) 553 ret i1 %control 554} 555 556define i1 @nonnull3D(ptr %a, i1 %control) { 557; CHECK-LABEL: @nonnull3D( 558; CHECK-NEXT: entry: 559; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]] 560; CHECK: taken: 561; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8 562; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null 563; CHECK-NEXT: [[CMP2:%.*]] = call i1 @tmp1(i1 [[CMP]]) 564; CHECK-NEXT: br label [[EXIT:%.*]] 565; CHECK: exit: 566; CHECK-NEXT: ret i1 [[CMP2]] 567; CHECK: not_taken: 568; CHECK-NEXT: ret i1 false 569; 570entry: 571 %load = load ptr, ptr %a 572 %cmp = icmp ne ptr %load, null 573 br i1 %control, label %taken, label %not_taken 574taken: 575 %cmp2 = call i1 @tmp1(i1 %cmp) 576 br label %exit 577exit: 578 ret i1 %cmp2 579not_taken: 580 call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)] 581 ret i1 %control 582} 583 584 585define void @always_true_assumption() { 586; CHECK-LABEL: @always_true_assumption( 587; CHECK-NEXT: ret void 588; 589 call void @llvm.assume(i1 true) 590 ret void 591} 592 593; The alloca guarantees that the low bits of %a are zero because of alignment. 594; The assume says the opposite. Make sure we don't crash. 595 596define i64 @PR31809() { 597; CHECK-LABEL: @PR31809( 598; CHECK-NEXT: store i1 true, ptr poison, align 1 599; CHECK-NEXT: ret i64 poison 600; 601 %a = alloca i32 602 %t1 = ptrtoint ptr %a to i64 603 %cond = icmp eq i64 %t1, 3 604 call void @llvm.assume(i1 %cond) 605 ret i64 %t1 606} 607 608; Similar to above: there's no way to know which assumption is truthful, 609; so just don't crash. 610 611define i8 @conflicting_assumptions(i8 %x){ 612; CHECK-LABEL: @conflicting_assumptions( 613; CHECK-NEXT: store i1 true, ptr poison, align 1 614; CHECK-NEXT: ret i8 poison 615; 616 %add = add i8 %x, 1 617 %cond1 = icmp eq i8 %x, 3 618 call void @llvm.assume(i1 %cond1) 619 %cond2 = icmp eq i8 %x, 4 620 call void @llvm.assume(i1 %cond2) 621 ret i8 %add 622} 623 624; Another case of conflicting assumptions. This would crash because we'd 625; try to set more known bits than existed in the known bits struct. 626 627define void @PR36270(i32 %b) { 628; CHECK-LABEL: @PR36270( 629; CHECK-NEXT: unreachable 630; 631 %B7 = xor i32 -1, 2147483647 632 %and1 = and i32 %b, 3 633 %B12 = lshr i32 %B7, %and1 634 %C1 = icmp ult i32 %and1, %B12 635 tail call void @llvm.assume(i1 %C1) 636 %cmp2 = icmp eq i32 0, %B12 637 tail call void @llvm.assume(i1 %cmp2) 638 unreachable 639} 640 641; PR47416 642 643define i32 @unreachable_assume(i32 %x, i32 %y) { 644; CHECK-LABEL: @unreachable_assume( 645; CHECK-NEXT: entry: 646; CHECK-NEXT: [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1 647; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1 648; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP0]], [[CMP1]] 649; CHECK-NEXT: tail call void @llvm.assume(i1 [[OR]]) 650; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[X]], 1 651; CHECK-NEXT: br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]] 652; CHECK: if: 653; CHECK-NEXT: [[A:%.*]] = and i32 [[Y]], -2 654; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i32 [[A]], 104 655; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP3]]) 656; CHECK-NEXT: br label [[EXIT]] 657; CHECK: exit: 658; CHECK-NEXT: unreachable 659; 660entry: 661 %cmp0 = icmp sgt i32 %x, 1 662 %cmp1 = icmp eq i32 %y, 1 663 %or = or i1 %cmp0, %cmp1 664 tail call void @llvm.assume(i1 %or) 665 %cmp2 = icmp eq i32 %x, 1 666 br i1 %cmp2, label %if, label %exit 667 668if: 669 %a = and i32 %y, -2 670 %cmp3 = icmp ne i32 %a, 104 671 tail call void @llvm.assume(i1 %cmp3) 672 br label %exit 673 674exit: 675 %cmp4 = icmp eq i32 %x, 2 676 tail call void @llvm.assume(i1 %cmp4) 677 unreachable 678} 679 680define i32 @unreachable_assume_logical(i32 %x, i32 %y) { 681; CHECK-LABEL: @unreachable_assume_logical( 682; CHECK-NEXT: entry: 683; CHECK-NEXT: [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1 684; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1 685; CHECK-NEXT: [[OR:%.*]] = select i1 [[CMP0]], i1 true, i1 [[CMP1]] 686; CHECK-NEXT: tail call void @llvm.assume(i1 [[OR]]) 687; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[X]], 1 688; CHECK-NEXT: br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]] 689; CHECK: if: 690; CHECK-NEXT: [[A:%.*]] = and i32 [[Y]], -2 691; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i32 [[A]], 104 692; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP3]]) 693; CHECK-NEXT: br label [[EXIT]] 694; CHECK: exit: 695; CHECK-NEXT: unreachable 696; 697entry: 698 %cmp0 = icmp sgt i32 %x, 1 699 %cmp1 = icmp eq i32 %y, 1 700 %or = select i1 %cmp0, i1 true, i1 %cmp1 701 tail call void @llvm.assume(i1 %or) 702 %cmp2 = icmp eq i32 %x, 1 703 br i1 %cmp2, label %if, label %exit 704 705if: 706 %a = and i32 %y, -2 707 %cmp3 = icmp ne i32 %a, 104 708 tail call void @llvm.assume(i1 %cmp3) 709 br label %exit 710 711exit: 712 %cmp4 = icmp eq i32 %x, 2 713 tail call void @llvm.assume(i1 %cmp4) 714 unreachable 715} 716 717define i32 @unreachable_assumes_and_store(i32 %x, i32 %y, ptr %p) { 718; CHECK-LABEL: @unreachable_assumes_and_store( 719; CHECK-NEXT: entry: 720; CHECK-NEXT: [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1 721; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1 722; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP0]], [[CMP1]] 723; CHECK-NEXT: tail call void @llvm.assume(i1 [[OR]]) 724; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[X]], 1 725; CHECK-NEXT: br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]] 726; CHECK: if: 727; CHECK-NEXT: [[A:%.*]] = and i32 [[Y]], -2 728; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i32 [[A]], 104 729; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP3]]) 730; CHECK-NEXT: br label [[EXIT]] 731; CHECK: exit: 732; CHECK-NEXT: unreachable 733; 734entry: 735 %cmp0 = icmp sgt i32 %x, 1 736 %cmp1 = icmp eq i32 %y, 1 737 %or = or i1 %cmp0, %cmp1 738 tail call void @llvm.assume(i1 %or) 739 %cmp2 = icmp eq i32 %x, 1 740 br i1 %cmp2, label %if, label %exit 741 742if: 743 %a = and i32 %y, -2 744 %cmp3 = icmp ne i32 %a, 104 745 tail call void @llvm.assume(i1 %cmp3) 746 br label %exit 747 748exit: 749 %cmp4 = icmp eq i32 %x, 2 750 tail call void @llvm.assume(i1 %cmp4) 751 %cmp5 = icmp ugt i32 %y, 42 752 tail call void @llvm.assume(i1 %cmp5) 753 store i32 %x, ptr %p 754 unreachable 755} 756 757define i32 @unreachable_assumes_and_store_logical(i32 %x, i32 %y, ptr %p) { 758; CHECK-LABEL: @unreachable_assumes_and_store_logical( 759; CHECK-NEXT: entry: 760; CHECK-NEXT: [[CMP0:%.*]] = icmp sgt i32 [[X:%.*]], 1 761; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[Y:%.*]], 1 762; CHECK-NEXT: [[OR:%.*]] = select i1 [[CMP0]], i1 true, i1 [[CMP1]] 763; CHECK-NEXT: tail call void @llvm.assume(i1 [[OR]]) 764; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[X]], 1 765; CHECK-NEXT: br i1 [[CMP2]], label [[IF:%.*]], label [[EXIT:%.*]] 766; CHECK: if: 767; CHECK-NEXT: [[A:%.*]] = and i32 [[Y]], -2 768; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i32 [[A]], 104 769; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP3]]) 770; CHECK-NEXT: br label [[EXIT]] 771; CHECK: exit: 772; CHECK-NEXT: unreachable 773; 774entry: 775 %cmp0 = icmp sgt i32 %x, 1 776 %cmp1 = icmp eq i32 %y, 1 777 %or = select i1 %cmp0, i1 true, i1 %cmp1 778 tail call void @llvm.assume(i1 %or) 779 %cmp2 = icmp eq i32 %x, 1 780 br i1 %cmp2, label %if, label %exit 781 782if: 783 %a = and i32 %y, -2 784 %cmp3 = icmp ne i32 %a, 104 785 tail call void @llvm.assume(i1 %cmp3) 786 br label %exit 787 788exit: 789 %cmp4 = icmp eq i32 %x, 2 790 tail call void @llvm.assume(i1 %cmp4) 791 %cmp5 = icmp ugt i32 %y, 42 792 tail call void @llvm.assume(i1 %cmp5) 793 store i32 %x, ptr %p 794 unreachable 795} 796 797define void @canonicalize_assume(ptr %0) { 798; DEFAULT-LABEL: @canonicalize_assume( 799; DEFAULT-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0:%.*]], i64 8 800; DEFAULT-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 16) ] 801; DEFAULT-NEXT: ret void 802; 803; BUNDLES-LABEL: @canonicalize_assume( 804; BUNDLES-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP0:%.*]], i64 8) ] 805; BUNDLES-NEXT: ret void 806; 807 %2 = getelementptr inbounds i32, ptr %0, i64 2 808 call void @llvm.assume(i1 true) [ "align"(ptr %2, i64 16) ] 809 ret void 810} 811 812define void @assume_makes_and_known_assume_on_arg(ptr %p, i32 %x) { 813; CHECK-LABEL: @assume_makes_and_known_assume_on_arg( 814; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1 815; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 816; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 817; CHECK-NEXT: store i32 0, ptr [[P:%.*]], align 4 818; CHECK-NEXT: ret void 819; 820 %and = and i32 %x, 1 821 %cmp = icmp eq i32 %and, 0 822 call void @llvm.assume(i1 %cmp) 823 %and2 = and i32 %x, 1 824 store i32 %and2, ptr %p 825 ret void 826} 827 828define void @assume_makes_and_known_assume_on_mul(ptr %p, i32 %a, i32 %b) { 829; CHECK-LABEL: @assume_makes_and_known_assume_on_mul( 830; CHECK-NEXT: [[X:%.*]] = mul i32 [[A:%.*]], [[B:%.*]] 831; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 1 832; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 833; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 834; CHECK-NEXT: store i32 0, ptr [[P:%.*]], align 4 835; CHECK-NEXT: ret void 836; 837 %x = mul i32 %a, %b 838 %and = and i32 %x, 1 839 %cmp = icmp eq i32 %and, 0 840 call void @llvm.assume(i1 %cmp) 841 %and2 = and i32 %x, 1 842 store i32 %and2, ptr %p 843 ret void 844} 845 846define void @assume_makes_and_known_assume_on_bitwise(ptr %p, i32 %a, i32 %b) { 847; CHECK-LABEL: @assume_makes_and_known_assume_on_bitwise( 848; CHECK-NEXT: [[X:%.*]] = or i32 [[A:%.*]], [[B:%.*]] 849; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], 1 850; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 851; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 852; CHECK-NEXT: store i32 0, ptr [[P:%.*]], align 4 853; CHECK-NEXT: ret void 854; 855 %x = or i32 %a, %b 856 %and = and i32 %x, 1 857 %cmp = icmp eq i32 %and, 0 858 call void @llvm.assume(i1 %cmp) 859 %and2 = and i32 %x, 1 860 store i32 %and2, ptr %p 861 ret void 862} 863 864define i32 @range_16_31_top28(i32 %x) { 865; CHECK-LABEL: @range_16_31_top28( 866; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], -16 867; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 16 868; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 869; CHECK-NEXT: ret i32 16 870; 871 %add = add i32 %x, -16 872 %cmp = icmp ult i32 %add, 16 873 call void @llvm.assume(i1 %cmp) 874 %res = and i32 %x, u0xfffffff0 875 ret i32 %res 876} 877 878define i32 @range_16_31_top29(i32 %x) { 879; CHECK-LABEL: @range_16_31_top29( 880; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], -16 881; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 16 882; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 883; CHECK-NEXT: [[RES:%.*]] = and i32 [[X]], 24 884; CHECK-NEXT: ret i32 [[RES]] 885; 886 %add = add i32 %x, -16 887 %cmp = icmp ult i32 %add, 16 888 call void @llvm.assume(i1 %cmp) 889 %res = and i32 %x, u0xfffffff8 890 ret i32 %res 891} 892 893define i32 @range_16_30_top28(i32 %x) { 894; CHECK-LABEL: @range_16_30_top28( 895; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X:%.*]], -16 896; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD]], 15 897; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 898; CHECK-NEXT: ret i32 16 899; 900 %add = add i32 %x, -16 901 %cmp = icmp ult i32 %add, 15 902 call void @llvm.assume(i1 %cmp) 903 %res = and i32 %x, u0xfffffff0 904 ret i32 %res 905} 906 907define i32 @range_16_32_top28(i32 %x) { 908; CHECK-LABEL: @range_16_32_top28( 909; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X:%.*]], -16 910; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD]], 17 911; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 912; CHECK-NEXT: [[RES:%.*]] = and i32 [[X]], 48 913; CHECK-NEXT: ret i32 [[RES]] 914; 915 %add = add i32 %x, -16 916 %cmp = icmp ult i32 %add, 17 917 call void @llvm.assume(i1 %cmp) 918 %res = and i32 %x, u0xfffffff0 919 ret i32 %res 920} 921 922define i32 @range_16_32_top27(i32 %x) { 923; CHECK-LABEL: @range_16_32_top27( 924; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X:%.*]], -16 925; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD]], 17 926; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 927; CHECK-NEXT: [[RES:%.*]] = and i32 [[X]], 32 928; CHECK-NEXT: ret i32 [[RES]] 929; 930 %add = add i32 %x, -16 931 %cmp = icmp ult i32 %add, 17 932 call void @llvm.assume(i1 %cmp) 933 %res = and i32 %x, u0xffffffe0 934 ret i32 %res 935} 936 937define i32 @range_16_32_top26(i32 %x) { 938; CHECK-LABEL: @range_16_32_top26( 939; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X:%.*]], -16 940; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD]], 17 941; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 942; CHECK-NEXT: ret i32 0 943; 944 %add = add i32 %x, -16 945 %cmp = icmp ult i32 %add, 17 946 call void @llvm.assume(i1 %cmp) 947 %res = and i32 %x, u0xffffffc0 948 ret i32 %res 949} 950 951define i32 @range_15_31_top28(i32 %x) { 952; CHECK-LABEL: @range_15_31_top28( 953; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X:%.*]], -15 954; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD]], 16 955; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 956; CHECK-NEXT: [[RES:%.*]] = and i32 [[X]], 16 957; CHECK-NEXT: ret i32 [[RES]] 958; 959 %add = add i32 %x, -15 960 %cmp = icmp ult i32 %add, 16 961 call void @llvm.assume(i1 %cmp) 962 %res = and i32 %x, u0xfffffff0 963 ret i32 %res 964} 965 966define i32 @range_15_31_top27(i32 %x) { 967; CHECK-LABEL: @range_15_31_top27( 968; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X:%.*]], -15 969; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD]], 16 970; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) 971; CHECK-NEXT: ret i32 0 972; 973 %add = add i32 %x, -15 974 %cmp = icmp ult i32 %add, 16 975 call void @llvm.assume(i1 %cmp) 976 %res = and i32 %x, u0xffffffe0 977 ret i32 %res 978} 979 980declare void @llvm.dbg.value(metadata, metadata, metadata) 981 982!llvm.dbg.cu = !{!0} 983!llvm.module.flags = !{!5, !6, !7, !8} 984 985!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !3, producer: "Me", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: null, retainedTypes: null, imports: null) 986!1 = !DILocalVariable(name: "", arg: 1, scope: !2, file: null, line: 1, type: null) 987!2 = distinct !DISubprogram(name: "debug", linkageName: "debug", scope: null, file: null, line: 0, type: null, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0) 988!3 = !DIFile(filename: "consecutive-fences.ll", directory: "") 989!5 = !{i32 2, !"Dwarf Version", i32 4} 990!6 = !{i32 2, !"Debug Info Version", i32 3} 991!7 = !{i32 1, !"wchar_size", i32 4} 992!8 = !{i32 7, !"PIC Level", i32 2} 993!9 = !DILocation(line: 0, column: 0, scope: !2) 994 995 996attributes #0 = { nounwind uwtable } 997attributes #1 = { nounwind } 998 999