1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=instcombine -S | FileCheck %s 3 4target datalayout = "e-p:64:64:64-p1:16:16:16-p2:32:32:32-p3:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" 5 6declare void @use(i8) 7 8define i1 @lshr_eq_msb_low_last_zero(i8 %a) { 9; CHECK-LABEL: @lshr_eq_msb_low_last_zero( 10; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[A:%.*]], 6 11; CHECK-NEXT: ret i1 [[CMP]] 12; 13 %shr = lshr i8 127, %a 14 %cmp = icmp eq i8 %shr, 0 15 ret i1 %cmp 16} 17 18define <2 x i1> @lshr_eq_msb_low_last_zero_vec(<2 x i8> %a) { 19; CHECK-LABEL: @lshr_eq_msb_low_last_zero_vec( 20; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <2 x i8> [[A:%.*]], splat (i8 6) 21; CHECK-NEXT: ret <2 x i1> [[CMP]] 22; 23 %shr = lshr <2 x i8> <i8 127, i8 127>, %a 24 %cmp = icmp eq <2 x i8> %shr, zeroinitializer 25 ret <2 x i1> %cmp 26} 27 28define i1 @ashr_eq_msb_low_second_zero(i8 %a) { 29; CHECK-LABEL: @ashr_eq_msb_low_second_zero( 30; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[A:%.*]], 6 31; CHECK-NEXT: ret i1 [[CMP]] 32; 33 %shr = ashr i8 127, %a 34 %cmp = icmp eq i8 %shr, 0 35 ret i1 %cmp 36} 37 38define i1 @lshr_ne_msb_low_last_zero(i8 %a) { 39; CHECK-LABEL: @lshr_ne_msb_low_last_zero( 40; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[A:%.*]], 7 41; CHECK-NEXT: ret i1 [[CMP]] 42; 43 %shr = lshr i8 127, %a 44 %cmp = icmp ne i8 %shr, 0 45 ret i1 %cmp 46} 47 48define i1 @ashr_ne_msb_low_second_zero(i8 %a) { 49; CHECK-LABEL: @ashr_ne_msb_low_second_zero( 50; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[A:%.*]], 7 51; CHECK-NEXT: ret i1 [[CMP]] 52; 53 %shr = ashr i8 127, %a 54 %cmp = icmp ne i8 %shr, 0 55 ret i1 %cmp 56} 57 58define i1 @ashr_eq_both_equal(i8 %a) { 59; CHECK-LABEL: @ashr_eq_both_equal( 60; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 0 61; CHECK-NEXT: ret i1 [[CMP]] 62; 63 %shr = ashr i8 128, %a 64 %cmp = icmp eq i8 %shr, 128 65 ret i1 %cmp 66} 67 68define i1 @ashr_ne_both_equal(i8 %a) { 69; CHECK-LABEL: @ashr_ne_both_equal( 70; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 0 71; CHECK-NEXT: ret i1 [[CMP]] 72; 73 %shr = ashr i8 128, %a 74 %cmp = icmp ne i8 %shr, 128 75 ret i1 %cmp 76} 77 78define i1 @lshr_eq_both_equal(i8 %a) { 79; CHECK-LABEL: @lshr_eq_both_equal( 80; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 0 81; CHECK-NEXT: ret i1 [[CMP]] 82; 83 %shr = lshr i8 127, %a 84 %cmp = icmp eq i8 %shr, 127 85 ret i1 %cmp 86} 87 88define i1 @lshr_ne_both_equal(i8 %a) { 89; CHECK-LABEL: @lshr_ne_both_equal( 90; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 0 91; CHECK-NEXT: ret i1 [[CMP]] 92; 93 %shr = lshr i8 127, %a 94 %cmp = icmp ne i8 %shr, 127 95 ret i1 %cmp 96} 97 98define i1 @exact_ashr_eq_both_equal(i8 %a) { 99; CHECK-LABEL: @exact_ashr_eq_both_equal( 100; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 0 101; CHECK-NEXT: ret i1 [[CMP]] 102; 103 %shr = ashr exact i8 128, %a 104 %cmp = icmp eq i8 %shr, 128 105 ret i1 %cmp 106} 107 108define i1 @exact_ashr_ne_both_equal(i8 %a) { 109; CHECK-LABEL: @exact_ashr_ne_both_equal( 110; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 0 111; CHECK-NEXT: ret i1 [[CMP]] 112; 113 %shr = ashr exact i8 128, %a 114 %cmp = icmp ne i8 %shr, 128 115 ret i1 %cmp 116} 117 118define i1 @exact_lshr_eq_both_equal(i8 %a) { 119; CHECK-LABEL: @exact_lshr_eq_both_equal( 120; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 0 121; CHECK-NEXT: ret i1 [[CMP]] 122; 123 %shr = lshr exact i8 126, %a 124 %cmp = icmp eq i8 %shr, 126 125 ret i1 %cmp 126} 127 128define i1 @exact_lshr_ne_both_equal(i8 %a) { 129; CHECK-LABEL: @exact_lshr_ne_both_equal( 130; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 0 131; CHECK-NEXT: ret i1 [[CMP]] 132; 133 %shr = lshr exact i8 126, %a 134 %cmp = icmp ne i8 %shr, 126 135 ret i1 %cmp 136} 137 138define i1 @exact_lshr_eq_opposite_msb(i8 %a) { 139; CHECK-LABEL: @exact_lshr_eq_opposite_msb( 140; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 7 141; CHECK-NEXT: ret i1 [[CMP]] 142; 143 %shr = lshr exact i8 -128, %a 144 %cmp = icmp eq i8 %shr, 1 145 ret i1 %cmp 146} 147 148define i1 @lshr_eq_opposite_msb(i8 %a) { 149; CHECK-LABEL: @lshr_eq_opposite_msb( 150; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 7 151; CHECK-NEXT: ret i1 [[CMP]] 152; 153 %shr = lshr i8 -128, %a 154 %cmp = icmp eq i8 %shr, 1 155 ret i1 %cmp 156} 157 158define i1 @exact_lshr_ne_opposite_msb(i8 %a) { 159; CHECK-LABEL: @exact_lshr_ne_opposite_msb( 160; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 7 161; CHECK-NEXT: ret i1 [[CMP]] 162; 163 %shr = lshr exact i8 -128, %a 164 %cmp = icmp ne i8 %shr, 1 165 ret i1 %cmp 166} 167 168define i1 @lshr_ne_opposite_msb(i8 %a) { 169; CHECK-LABEL: @lshr_ne_opposite_msb( 170; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 7 171; CHECK-NEXT: ret i1 [[CMP]] 172; 173 %shr = lshr i8 -128, %a 174 %cmp = icmp ne i8 %shr, 1 175 ret i1 %cmp 176} 177 178define i1 @exact_ashr_eq(i8 %a) { 179; CHECK-LABEL: @exact_ashr_eq( 180; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 7 181; CHECK-NEXT: ret i1 [[CMP]] 182; 183 %shr = ashr exact i8 -128, %a 184 %cmp = icmp eq i8 %shr, -1 185 ret i1 %cmp 186} 187 188define i1 @exact_ashr_ne(i8 %a) { 189; CHECK-LABEL: @exact_ashr_ne( 190; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 7 191; CHECK-NEXT: ret i1 [[CMP]] 192; 193 %shr = ashr exact i8 -128, %a 194 %cmp = icmp ne i8 %shr, -1 195 ret i1 %cmp 196} 197 198define i1 @exact_lshr_eq(i8 %a) { 199; CHECK-LABEL: @exact_lshr_eq( 200; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 2 201; CHECK-NEXT: ret i1 [[CMP]] 202; 203 %shr = lshr exact i8 4, %a 204 %cmp = icmp eq i8 %shr, 1 205 ret i1 %cmp 206} 207 208define i1 @exact_lshr_ne(i8 %a) { 209; CHECK-LABEL: @exact_lshr_ne( 210; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 2 211; CHECK-NEXT: ret i1 [[CMP]] 212; 213 %shr = lshr exact i8 4, %a 214 %cmp = icmp ne i8 %shr, 1 215 ret i1 %cmp 216} 217 218define i1 @nonexact_ashr_eq(i8 %a) { 219; CHECK-LABEL: @nonexact_ashr_eq( 220; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 7 221; CHECK-NEXT: ret i1 [[CMP]] 222; 223 %shr = ashr i8 -128, %a 224 %cmp = icmp eq i8 %shr, -1 225 ret i1 %cmp 226} 227 228define i1 @nonexact_ashr_ne(i8 %a) { 229; CHECK-LABEL: @nonexact_ashr_ne( 230; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 7 231; CHECK-NEXT: ret i1 [[CMP]] 232; 233 %shr = ashr i8 -128, %a 234 %cmp = icmp ne i8 %shr, -1 235 ret i1 %cmp 236} 237 238define i1 @nonexact_lshr_eq(i8 %a) { 239; CHECK-LABEL: @nonexact_lshr_eq( 240; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 2 241; CHECK-NEXT: ret i1 [[CMP]] 242; 243 %shr = lshr i8 4, %a 244 %cmp = icmp eq i8 %shr, 1 245 ret i1 %cmp 246} 247 248define i1 @nonexact_lshr_ne(i8 %a) { 249; CHECK-LABEL: @nonexact_lshr_ne( 250; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 2 251; CHECK-NEXT: ret i1 [[CMP]] 252; 253 %shr = lshr i8 4, %a 254 %cmp = icmp ne i8 %shr, 1 255 ret i1 %cmp 256} 257 258define i1 @exact_lshr_eq_exactdiv(i8 %a) { 259; CHECK-LABEL: @exact_lshr_eq_exactdiv( 260; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 4 261; CHECK-NEXT: ret i1 [[CMP]] 262; 263 %shr = lshr exact i8 80, %a 264 %cmp = icmp eq i8 %shr, 5 265 ret i1 %cmp 266} 267 268define i1 @exact_lshr_ne_exactdiv(i8 %a) { 269; CHECK-LABEL: @exact_lshr_ne_exactdiv( 270; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 4 271; CHECK-NEXT: ret i1 [[CMP]] 272; 273 %shr = lshr exact i8 80, %a 274 %cmp = icmp ne i8 %shr, 5 275 ret i1 %cmp 276} 277 278define i1 @nonexact_lshr_eq_exactdiv(i8 %a) { 279; CHECK-LABEL: @nonexact_lshr_eq_exactdiv( 280; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 4 281; CHECK-NEXT: ret i1 [[CMP]] 282; 283 %shr = lshr i8 80, %a 284 %cmp = icmp eq i8 %shr, 5 285 ret i1 %cmp 286} 287 288define i1 @nonexact_lshr_ne_exactdiv(i8 %a) { 289; CHECK-LABEL: @nonexact_lshr_ne_exactdiv( 290; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 4 291; CHECK-NEXT: ret i1 [[CMP]] 292; 293 %shr = lshr i8 80, %a 294 %cmp = icmp ne i8 %shr, 5 295 ret i1 %cmp 296} 297 298define i1 @exact_ashr_eq_exactdiv(i8 %a) { 299; CHECK-LABEL: @exact_ashr_eq_exactdiv( 300; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 4 301; CHECK-NEXT: ret i1 [[CMP]] 302; 303 %shr = ashr exact i8 -80, %a 304 %cmp = icmp eq i8 %shr, -5 305 ret i1 %cmp 306} 307 308define i1 @exact_ashr_ne_exactdiv(i8 %a) { 309; CHECK-LABEL: @exact_ashr_ne_exactdiv( 310; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 4 311; CHECK-NEXT: ret i1 [[CMP]] 312; 313 %shr = ashr exact i8 -80, %a 314 %cmp = icmp ne i8 %shr, -5 315 ret i1 %cmp 316} 317 318define i1 @nonexact_ashr_eq_exactdiv(i8 %a) { 319; CHECK-LABEL: @nonexact_ashr_eq_exactdiv( 320; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], 4 321; CHECK-NEXT: ret i1 [[CMP]] 322; 323 %shr = ashr i8 -80, %a 324 %cmp = icmp eq i8 %shr, -5 325 ret i1 %cmp 326} 327 328define i1 @nonexact_ashr_ne_exactdiv(i8 %a) { 329; CHECK-LABEL: @nonexact_ashr_ne_exactdiv( 330; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[A:%.*]], 4 331; CHECK-NEXT: ret i1 [[CMP]] 332; 333 %shr = ashr i8 -80, %a 334 %cmp = icmp ne i8 %shr, -5 335 ret i1 %cmp 336} 337 338define i1 @exact_lshr_eq_noexactdiv(i8 %a) { 339; CHECK-LABEL: @exact_lshr_eq_noexactdiv( 340; CHECK-NEXT: ret i1 false 341; 342 %shr = lshr exact i8 80, %a 343 %cmp = icmp eq i8 %shr, 31 344 ret i1 %cmp 345} 346 347define i1 @exact_lshr_ne_noexactdiv(i8 %a) { 348; CHECK-LABEL: @exact_lshr_ne_noexactdiv( 349; CHECK-NEXT: ret i1 true 350; 351 %shr = lshr exact i8 80, %a 352 %cmp = icmp ne i8 %shr, 31 353 ret i1 %cmp 354} 355 356define i1 @nonexact_lshr_eq_noexactdiv(i8 %a) { 357; CHECK-LABEL: @nonexact_lshr_eq_noexactdiv( 358; CHECK-NEXT: ret i1 false 359; 360 %shr = lshr i8 80, %a 361 %cmp = icmp eq i8 %shr, 31 362 ret i1 %cmp 363} 364 365define i1 @nonexact_lshr_ne_noexactdiv(i8 %a) { 366; CHECK-LABEL: @nonexact_lshr_ne_noexactdiv( 367; CHECK-NEXT: ret i1 true 368; 369 %shr = lshr i8 80, %a 370 %cmp = icmp ne i8 %shr, 31 371 ret i1 %cmp 372} 373 374define i1 @exact_ashr_eq_noexactdiv(i8 %a) { 375; CHECK-LABEL: @exact_ashr_eq_noexactdiv( 376; CHECK-NEXT: ret i1 false 377; 378 %shr = ashr exact i8 -80, %a 379 %cmp = icmp eq i8 %shr, -31 380 ret i1 %cmp 381} 382 383define i1 @exact_ashr_ne_noexactdiv(i8 %a) { 384; CHECK-LABEL: @exact_ashr_ne_noexactdiv( 385; CHECK-NEXT: ret i1 true 386; 387 %shr = ashr exact i8 -80, %a 388 %cmp = icmp ne i8 %shr, -31 389 ret i1 %cmp 390} 391 392define i1 @nonexact_ashr_eq_noexactdiv(i8 %a) { 393; CHECK-LABEL: @nonexact_ashr_eq_noexactdiv( 394; CHECK-NEXT: ret i1 false 395; 396 %shr = ashr i8 -80, %a 397 %cmp = icmp eq i8 %shr, -31 398 ret i1 %cmp 399} 400 401define i1 @nonexact_ashr_ne_noexactdiv(i8 %a) { 402; CHECK-LABEL: @nonexact_ashr_ne_noexactdiv( 403; CHECK-NEXT: ret i1 true 404; 405 %shr = ashr i8 -80, %a 406 %cmp = icmp ne i8 %shr, -31 407 ret i1 %cmp 408} 409 410define i1 @nonexact_lshr_eq_noexactlog(i8 %a) { 411; CHECK-LABEL: @nonexact_lshr_eq_noexactlog( 412; CHECK-NEXT: ret i1 false 413; 414 %shr = lshr i8 90, %a 415 %cmp = icmp eq i8 %shr, 30 416 ret i1 %cmp 417} 418 419define i1 @nonexact_lshr_ne_noexactlog(i8 %a) { 420; CHECK-LABEL: @nonexact_lshr_ne_noexactlog( 421; CHECK-NEXT: ret i1 true 422; 423 %shr = lshr i8 90, %a 424 %cmp = icmp ne i8 %shr, 30 425 ret i1 %cmp 426} 427 428define i1 @nonexact_ashr_eq_noexactlog(i8 %a) { 429; CHECK-LABEL: @nonexact_ashr_eq_noexactlog( 430; CHECK-NEXT: ret i1 false 431; 432 %shr = ashr i8 -90, %a 433 %cmp = icmp eq i8 %shr, -30 434 ret i1 %cmp 435} 436 437define i1 @nonexact_ashr_ne_noexactlog(i8 %a) { 438; CHECK-LABEL: @nonexact_ashr_ne_noexactlog( 439; CHECK-NEXT: ret i1 true 440; 441 %shr = ashr i8 -90, %a 442 %cmp = icmp ne i8 %shr, -30 443 ret i1 %cmp 444} 445 446; Don't try to fold the entire body of function @PR20945 into a 447; single `ret i1 true` statement. 448; If %B is equal to 1, then this function would return false. 449; As a consequence, the instruction combiner is not allowed to fold %cmp 450; to 'true'. Instead, it should replace %cmp with a simpler comparison 451; between %B and 1. 452 453define i1 @PR20945(i32 %B) { 454; CHECK-LABEL: @PR20945( 455; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[B:%.*]], 1 456; CHECK-NEXT: ret i1 [[CMP]] 457; 458 %shr = ashr i32 -9, %B 459 %cmp = icmp ne i32 %shr, -5 460 ret i1 %cmp 461} 462 463define i1 @PR21222(i32 %B) { 464; CHECK-LABEL: @PR21222( 465; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 6 466; CHECK-NEXT: ret i1 [[CMP]] 467; 468 %shr = ashr i32 -93, %B 469 %cmp = icmp eq i32 %shr, -2 470 ret i1 %cmp 471} 472 473define i1 @PR24873(i64 %V) { 474; CHECK-LABEL: @PR24873( 475; CHECK-NEXT: [[ICMP:%.*]] = icmp ugt i64 [[V:%.*]], 61 476; CHECK-NEXT: ret i1 [[ICMP]] 477; 478 %ashr = ashr i64 -4611686018427387904, %V 479 %icmp = icmp eq i64 %ashr, -1 480 ret i1 %icmp 481} 482 483declare void @foo(i32) 484 485define i1 @exact_multiuse(i32 %x) { 486; CHECK-LABEL: @exact_multiuse( 487; CHECK-NEXT: [[SH:%.*]] = lshr exact i32 [[X:%.*]], 7 488; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], 131072 489; CHECK-NEXT: call void @foo(i32 [[SH]]) 490; CHECK-NEXT: ret i1 [[CMP]] 491; 492 %sh = lshr exact i32 %x, 7 493 %cmp = icmp eq i32 %sh, 1024 494 call void @foo(i32 %sh) 495 ret i1 %cmp 496} 497 498; PR9343 #1 499define i1 @ashr_exact_eq_0(i32 %X, i32 %Y) { 500; CHECK-LABEL: @ashr_exact_eq_0( 501; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[X:%.*]], 0 502; CHECK-NEXT: ret i1 [[B]] 503; 504 %A = ashr exact i32 %X, %Y 505 %B = icmp eq i32 %A, 0 506 ret i1 %B 507} 508 509define i1 @ashr_exact_ne_0_uses(i32 %X, i32 %Y) { 510; CHECK-LABEL: @ashr_exact_ne_0_uses( 511; CHECK-NEXT: [[A:%.*]] = ashr exact i32 [[X:%.*]], [[Y:%.*]] 512; CHECK-NEXT: call void @foo(i32 [[A]]) 513; CHECK-NEXT: [[B:%.*]] = icmp ne i32 [[X]], 0 514; CHECK-NEXT: ret i1 [[B]] 515; 516 %A = ashr exact i32 %X, %Y 517 call void @foo(i32 %A) 518 %B = icmp ne i32 %A, 0 519 ret i1 %B 520} 521 522define <2 x i1> @ashr_exact_eq_0_vec(<2 x i32> %X, <2 x i32> %Y) { 523; CHECK-LABEL: @ashr_exact_eq_0_vec( 524; CHECK-NEXT: [[B:%.*]] = icmp eq <2 x i32> [[X:%.*]], zeroinitializer 525; CHECK-NEXT: ret <2 x i1> [[B]] 526; 527 %A = ashr exact <2 x i32> %X, %Y 528 %B = icmp eq <2 x i32> %A, zeroinitializer 529 ret <2 x i1> %B 530} 531 532define i1 @lshr_exact_ne_0(i32 %X, i32 %Y) { 533; CHECK-LABEL: @lshr_exact_ne_0( 534; CHECK-NEXT: [[B:%.*]] = icmp ne i32 [[X:%.*]], 0 535; CHECK-NEXT: ret i1 [[B]] 536; 537 %A = lshr exact i32 %X, %Y 538 %B = icmp ne i32 %A, 0 539 ret i1 %B 540} 541 542define i1 @lshr_exact_eq_0_uses(i32 %X, i32 %Y) { 543; CHECK-LABEL: @lshr_exact_eq_0_uses( 544; CHECK-NEXT: [[A:%.*]] = lshr exact i32 [[X:%.*]], [[Y:%.*]] 545; CHECK-NEXT: call void @foo(i32 [[A]]) 546; CHECK-NEXT: [[B:%.*]] = icmp eq i32 [[X]], 0 547; CHECK-NEXT: ret i1 [[B]] 548; 549 %A = lshr exact i32 %X, %Y 550 call void @foo(i32 %A) 551 %B = icmp eq i32 %A, 0 552 ret i1 %B 553} 554 555define <2 x i1> @lshr_exact_ne_0_vec(<2 x i32> %X, <2 x i32> %Y) { 556; CHECK-LABEL: @lshr_exact_ne_0_vec( 557; CHECK-NEXT: [[B:%.*]] = icmp ne <2 x i32> [[X:%.*]], zeroinitializer 558; CHECK-NEXT: ret <2 x i1> [[B]] 559; 560 %A = lshr exact <2 x i32> %X, %Y 561 %B = icmp ne <2 x i32> %A, zeroinitializer 562 ret <2 x i1> %B 563} 564 565; Verify conversions of ashr+icmp to a sign-bit test. 566 567; negative test, but different transform possible 568 569define i1 @ashr_ugt_0(i4 %x) { 570; CHECK-LABEL: @ashr_ugt_0( 571; CHECK-NEXT: [[R:%.*]] = icmp ugt i4 [[X:%.*]], 1 572; CHECK-NEXT: ret i1 [[R]] 573; 574 %s = ashr i4 %x, 1 575 %r = icmp ugt i4 %s, 0 ; 0b0000 576 ret i1 %r 577} 578 579define i1 @ashr_ugt_0_multiuse(i4 %x, ptr %p) { 580; CHECK-LABEL: @ashr_ugt_0_multiuse( 581; CHECK-NEXT: [[S:%.*]] = ashr i4 [[X:%.*]], 1 582; CHECK-NEXT: [[R:%.*]] = icmp ugt i4 [[X]], 1 583; CHECK-NEXT: store i4 [[S]], ptr [[P:%.*]], align 1 584; CHECK-NEXT: ret i1 [[R]] 585; 586 %s = ashr i4 %x, 1 587 %r = icmp ugt i4 %s, 0 ; 0b0000 588 store i4 %s, ptr %p 589 ret i1 %r 590} 591 592define i1 @ashr_ugt_1(i4 %x) { 593; CHECK-LABEL: @ashr_ugt_1( 594; CHECK-NEXT: [[R:%.*]] = icmp ugt i4 [[X:%.*]], 3 595; CHECK-NEXT: ret i1 [[R]] 596; 597 %s = ashr i4 %x, 1 598 %r = icmp ugt i4 %s, 1 ; 0b0001 599 ret i1 %r 600} 601 602define i1 @ashr_ugt_2(i4 %x) { 603; CHECK-LABEL: @ashr_ugt_2( 604; CHECK-NEXT: [[R:%.*]] = icmp ugt i4 [[X:%.*]], 5 605; CHECK-NEXT: ret i1 [[R]] 606; 607 %s = ashr i4 %x, 1 608 %r = icmp ugt i4 %s, 2 ; 0b0010 609 ret i1 %r 610} 611 612define i1 @ashr_ugt_3(i4 %x) { 613; CHECK-LABEL: @ashr_ugt_3( 614; CHECK-NEXT: [[R:%.*]] = icmp slt i4 [[X:%.*]], 0 615; CHECK-NEXT: ret i1 [[R]] 616; 617 %s = ashr i4 %x, 1 618 %r = icmp ugt i4 %s, 3 ; 0b0011 619 ret i1 %r 620} 621 622define i1 @ashr_ugt_4(i4 %x) { 623; CHECK-LABEL: @ashr_ugt_4( 624; CHECK-NEXT: [[R:%.*]] = icmp slt i4 [[X:%.*]], 0 625; CHECK-NEXT: ret i1 [[R]] 626; 627 %s = ashr i4 %x, 1 628 %r = icmp ugt i4 %s, 4 ; 0b0100 629 ret i1 %r 630} 631 632define i1 @ashr_ugt_5(i4 %x) { 633; CHECK-LABEL: @ashr_ugt_5( 634; CHECK-NEXT: [[R:%.*]] = icmp slt i4 [[X:%.*]], 0 635; CHECK-NEXT: ret i1 [[R]] 636; 637 %s = ashr i4 %x, 1 638 %r = icmp ugt i4 %s, 5 ; 0b0101 639 ret i1 %r 640} 641 642define i1 @ashr_ugt_6(i4 %x) { 643; CHECK-LABEL: @ashr_ugt_6( 644; CHECK-NEXT: [[R:%.*]] = icmp slt i4 [[X:%.*]], 0 645; CHECK-NEXT: ret i1 [[R]] 646; 647 %s = ashr i4 %x, 1 648 %r = icmp ugt i4 %s, 6 ; 0b0110 649 ret i1 %r 650} 651 652define i1 @ashr_ugt_7(i4 %x) { 653; CHECK-LABEL: @ashr_ugt_7( 654; CHECK-NEXT: [[R:%.*]] = icmp slt i4 [[X:%.*]], 0 655; CHECK-NEXT: ret i1 [[R]] 656; 657 %s = ashr i4 %x, 1 658 %r = icmp ugt i4 %s, 7 ; 0b0111 659 ret i1 %r 660} 661 662define i1 @ashr_ugt_8(i4 %x) { 663; CHECK-LABEL: @ashr_ugt_8( 664; CHECK-NEXT: [[R:%.*]] = icmp slt i4 [[X:%.*]], 0 665; CHECK-NEXT: ret i1 [[R]] 666; 667 %s = ashr i4 %x, 1 668 %r = icmp ugt i4 %s, 8 ; 0b1000 669 ret i1 %r 670} 671 672define i1 @ashr_ugt_9(i4 %x) { 673; CHECK-LABEL: @ashr_ugt_9( 674; CHECK-NEXT: [[R:%.*]] = icmp slt i4 [[X:%.*]], 0 675; CHECK-NEXT: ret i1 [[R]] 676; 677 %s = ashr i4 %x, 1 678 %r = icmp ugt i4 %s, 9 ; 0b1001 679 ret i1 %r 680} 681 682define i1 @ashr_ugt_10(i4 %x) { 683; CHECK-LABEL: @ashr_ugt_10( 684; CHECK-NEXT: [[R:%.*]] = icmp slt i4 [[X:%.*]], 0 685; CHECK-NEXT: ret i1 [[R]] 686; 687 %s = ashr i4 %x, 1 688 %r = icmp ugt i4 %s, 10 ; 0b1010 689 ret i1 %r 690} 691 692define i1 @ashr_ugt_11(i4 %x) { 693; CHECK-LABEL: @ashr_ugt_11( 694; CHECK-NEXT: [[R:%.*]] = icmp slt i4 [[X:%.*]], 0 695; CHECK-NEXT: ret i1 [[R]] 696; 697 %s = ashr i4 %x, 1 698 %r = icmp ugt i4 %s, 11 ; 0b1011 699 ret i1 %r 700} 701 702define i1 @ashr_ugt_12(i4 %x) { 703; CHECK-LABEL: @ashr_ugt_12( 704; CHECK-NEXT: [[R:%.*]] = icmp ugt i4 [[X:%.*]], -7 705; CHECK-NEXT: ret i1 [[R]] 706; 707 %s = ashr i4 %x, 1 708 %r = icmp ugt i4 %s, 12 ; 0b1100 709 ret i1 %r 710} 711 712define i1 @ashr_ugt_13(i4 %x) { 713; CHECK-LABEL: @ashr_ugt_13( 714; CHECK-NEXT: [[R:%.*]] = icmp ugt i4 [[X:%.*]], -5 715; CHECK-NEXT: ret i1 [[R]] 716; 717 %s = ashr i4 %x, 1 718 %r = icmp ugt i4 %s, 13 ; 0b1101 719 ret i1 %r 720} 721 722; negative test, but different transform possible 723 724define i1 @ashr_ugt_14(i4 %x) { 725; CHECK-LABEL: @ashr_ugt_14( 726; CHECK-NEXT: [[R:%.*]] = icmp ugt i4 [[X:%.*]], -3 727; CHECK-NEXT: ret i1 [[R]] 728; 729 %s = ashr i4 %x, 1 730 %r = icmp ugt i4 %s, 14 ; 0b1110 731 ret i1 %r 732} 733 734; negative test, but simplifies 735 736define i1 @ashr_ugt_15(i4 %x) { 737; CHECK-LABEL: @ashr_ugt_15( 738; CHECK-NEXT: ret i1 false 739; 740 %s = ashr i4 %x, 1 741 %r = icmp ugt i4 %s, 15 ; 0b1111 742 ret i1 %r 743} 744 745; negative test, but simplifies 746 747define i1 @ashr_ult_0(i4 %x) { 748; CHECK-LABEL: @ashr_ult_0( 749; CHECK-NEXT: ret i1 false 750; 751 %s = ashr i4 %x, 1 752 %r = icmp ult i4 %s, 0 ; 0b0000 753 ret i1 %r 754} 755 756; negative test, but different transform possible 757 758define i1 @ashr_ult_1(i4 %x) { 759; CHECK-LABEL: @ashr_ult_1( 760; CHECK-NEXT: [[R:%.*]] = icmp ult i4 [[X:%.*]], 2 761; CHECK-NEXT: ret i1 [[R]] 762; 763 %s = ashr i4 %x, 1 764 %r = icmp ult i4 %s, 1 ; 0b0001 765 ret i1 %r 766} 767 768; negative test 769 770define i1 @ashr_ult_2(i4 %x) { 771; CHECK-LABEL: @ashr_ult_2( 772; CHECK-NEXT: [[R:%.*]] = icmp ult i4 [[X:%.*]], 4 773; CHECK-NEXT: ret i1 [[R]] 774; 775 %s = ashr i4 %x, 1 776 %r = icmp ult i4 %s, 2 ; 0b0010 777 ret i1 %r 778} 779 780define i1 @ashr_ult_2_multiuse(i4 %x, ptr %p) { 781; CHECK-LABEL: @ashr_ult_2_multiuse( 782; CHECK-NEXT: [[S:%.*]] = ashr i4 [[X:%.*]], 1 783; CHECK-NEXT: [[R:%.*]] = icmp ult i4 [[S]], 2 784; CHECK-NEXT: store i4 [[S]], ptr [[P:%.*]], align 1 785; CHECK-NEXT: ret i1 [[R]] 786; 787 %s = ashr i4 %x, 1 788 %r = icmp ult i4 %s, 2 ; 0b0010 789 store i4 %s, ptr %p 790 ret i1 %r 791} 792 793; negative test 794 795define i1 @ashr_ult_3(i4 %x) { 796; CHECK-LABEL: @ashr_ult_3( 797; CHECK-NEXT: [[R:%.*]] = icmp ult i4 [[X:%.*]], 6 798; CHECK-NEXT: ret i1 [[R]] 799; 800 %s = ashr i4 %x, 1 801 %r = icmp ult i4 %s, 3 ; 0b0011 802 ret i1 %r 803} 804 805define i1 @ashr_ult_4(i4 %x) { 806; CHECK-LABEL: @ashr_ult_4( 807; CHECK-NEXT: [[R:%.*]] = icmp sgt i4 [[X:%.*]], -1 808; CHECK-NEXT: ret i1 [[R]] 809; 810 %s = ashr i4 %x, 1 811 %r = icmp ult i4 %s, 4 ; 0b0100 812 ret i1 %r 813} 814 815define i1 @ashr_ult_5(i4 %x) { 816; CHECK-LABEL: @ashr_ult_5( 817; CHECK-NEXT: [[R:%.*]] = icmp sgt i4 [[X:%.*]], -1 818; CHECK-NEXT: ret i1 [[R]] 819; 820 %s = ashr i4 %x, 1 821 %r = icmp ult i4 %s, 5 ; 0b0101 822 ret i1 %r 823} 824 825define i1 @ashr_ult_6(i4 %x) { 826; CHECK-LABEL: @ashr_ult_6( 827; CHECK-NEXT: [[R:%.*]] = icmp sgt i4 [[X:%.*]], -1 828; CHECK-NEXT: ret i1 [[R]] 829; 830 %s = ashr i4 %x, 1 831 %r = icmp ult i4 %s, 6 ; 0b0110 832 ret i1 %r 833} 834 835define i1 @ashr_ult_7(i4 %x) { 836; CHECK-LABEL: @ashr_ult_7( 837; CHECK-NEXT: [[R:%.*]] = icmp sgt i4 [[X:%.*]], -1 838; CHECK-NEXT: ret i1 [[R]] 839; 840 %s = ashr i4 %x, 1 841 %r = icmp ult i4 %s, 7 ; 0b0111 842 ret i1 %r 843} 844 845define i1 @ashr_ult_8(i4 %x) { 846; CHECK-LABEL: @ashr_ult_8( 847; CHECK-NEXT: [[R:%.*]] = icmp sgt i4 [[X:%.*]], -1 848; CHECK-NEXT: ret i1 [[R]] 849; 850 %s = ashr i4 %x, 1 851 %r = icmp ult i4 %s, 8 ; 0b1000 852 ret i1 %r 853} 854 855define i1 @ashr_ult_9(i4 %x) { 856; CHECK-LABEL: @ashr_ult_9( 857; CHECK-NEXT: [[R:%.*]] = icmp sgt i4 [[X:%.*]], -1 858; CHECK-NEXT: ret i1 [[R]] 859; 860 %s = ashr i4 %x, 1 861 %r = icmp ult i4 %s, 9 ; 0b1001 862 ret i1 %r 863} 864 865define i1 @ashr_ult_10(i4 %x) { 866; CHECK-LABEL: @ashr_ult_10( 867; CHECK-NEXT: [[R:%.*]] = icmp sgt i4 [[X:%.*]], -1 868; CHECK-NEXT: ret i1 [[R]] 869; 870 %s = ashr i4 %x, 1 871 %r = icmp ult i4 %s, 10 ; 0b1010 872 ret i1 %r 873} 874 875define i1 @ashr_ult_11(i4 %x) { 876; CHECK-LABEL: @ashr_ult_11( 877; CHECK-NEXT: [[R:%.*]] = icmp sgt i4 [[X:%.*]], -1 878; CHECK-NEXT: ret i1 [[R]] 879; 880 %s = ashr i4 %x, 1 881 %r = icmp ult i4 %s, 11 ; 0b1011 882 ret i1 %r 883} 884 885; negative test 886 887define i1 @ashr_ult_12(i4 %x) { 888; CHECK-LABEL: @ashr_ult_12( 889; CHECK-NEXT: [[R:%.*]] = icmp sgt i4 [[X:%.*]], -1 890; CHECK-NEXT: ret i1 [[R]] 891; 892 %s = ashr i4 %x, 1 893 %r = icmp ult i4 %s, 12 ; 0b1100 894 ret i1 %r 895} 896 897; negative test 898 899define i1 @ashr_ult_13(i4 %x) { 900; CHECK-LABEL: @ashr_ult_13( 901; CHECK-NEXT: [[R:%.*]] = icmp ult i4 [[X:%.*]], -6 902; CHECK-NEXT: ret i1 [[R]] 903; 904 %s = ashr i4 %x, 1 905 %r = icmp ult i4 %s, 13 ; 0b1101 906 ret i1 %r 907} 908 909; negative test 910 911define i1 @ashr_ult_14(i4 %x) { 912; CHECK-LABEL: @ashr_ult_14( 913; CHECK-NEXT: [[R:%.*]] = icmp ult i4 [[X:%.*]], -4 914; CHECK-NEXT: ret i1 [[R]] 915; 916 %s = ashr i4 %x, 1 917 %r = icmp ult i4 %s, 14 ; 0b1110 918 ret i1 %r 919} 920 921; negative test, but different transform possible 922 923define i1 @ashr_ult_15(i4 %x) { 924; CHECK-LABEL: @ashr_ult_15( 925; CHECK-NEXT: [[R:%.*]] = icmp ult i4 [[X:%.*]], -2 926; CHECK-NEXT: ret i1 [[R]] 927; 928 %s = ashr i4 %x, 1 929 %r = icmp ult i4 %s, 15 ; 0b1111 930 ret i1 %r 931} 932 933define i1 @lshr_eq_0_multiuse(i8 %x) { 934; CHECK-LABEL: @lshr_eq_0_multiuse( 935; CHECK-NEXT: [[S:%.*]] = lshr i8 [[X:%.*]], 2 936; CHECK-NEXT: call void @use(i8 [[S]]) 937; CHECK-NEXT: [[C:%.*]] = icmp ult i8 [[X]], 4 938; CHECK-NEXT: ret i1 [[C]] 939; 940 %s = lshr i8 %x, 2 941 call void @use(i8 %s) 942 %c = icmp eq i8 %s, 0 943 ret i1 %c 944} 945 946define i1 @lshr_ne_0_multiuse(i8 %x) { 947; CHECK-LABEL: @lshr_ne_0_multiuse( 948; CHECK-NEXT: [[S:%.*]] = lshr i8 [[X:%.*]], 2 949; CHECK-NEXT: call void @use(i8 [[S]]) 950; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 [[X]], 3 951; CHECK-NEXT: ret i1 [[C]] 952; 953 %s = lshr i8 %x, 2 954 call void @use(i8 %s) 955 %c = icmp ne i8 %s, 0 956 ret i1 %c 957} 958 959define i1 @ashr_eq_0_multiuse(i8 %x) { 960; CHECK-LABEL: @ashr_eq_0_multiuse( 961; CHECK-NEXT: [[S:%.*]] = ashr i8 [[X:%.*]], 2 962; CHECK-NEXT: call void @use(i8 [[S]]) 963; CHECK-NEXT: [[C:%.*]] = icmp ult i8 [[X]], 4 964; CHECK-NEXT: ret i1 [[C]] 965; 966 %s = ashr i8 %x, 2 967 call void @use(i8 %s) 968 %c = icmp eq i8 %s, 0 969 ret i1 %c 970} 971 972define i1 @ashr_ne_0_multiuse(i8 %x) { 973; CHECK-LABEL: @ashr_ne_0_multiuse( 974; CHECK-NEXT: [[S:%.*]] = ashr i8 [[X:%.*]], 2 975; CHECK-NEXT: call void @use(i8 [[S]]) 976; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 [[X]], 3 977; CHECK-NEXT: ret i1 [[C]] 978; 979 %s = ashr i8 %x, 2 980 call void @use(i8 %s) 981 %c = icmp ne i8 %s, 0 982 ret i1 %c 983} 984 985define i1 @lshr_exact_eq_0_multiuse(i8 %x) { 986; CHECK-LABEL: @lshr_exact_eq_0_multiuse( 987; CHECK-NEXT: [[S:%.*]] = lshr exact i8 [[X:%.*]], 2 988; CHECK-NEXT: call void @use(i8 [[S]]) 989; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[X]], 0 990; CHECK-NEXT: ret i1 [[C]] 991; 992 %s = lshr exact i8 %x, 2 993 call void @use(i8 %s) 994 %c = icmp eq i8 %s, 0 995 ret i1 %c 996} 997 998define i1 @lshr_exact_ne_0_multiuse(i8 %x) { 999; CHECK-LABEL: @lshr_exact_ne_0_multiuse( 1000; CHECK-NEXT: [[S:%.*]] = lshr exact i8 [[X:%.*]], 2 1001; CHECK-NEXT: call void @use(i8 [[S]]) 1002; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[X]], 0 1003; CHECK-NEXT: ret i1 [[C]] 1004; 1005 %s = lshr exact i8 %x, 2 1006 call void @use(i8 %s) 1007 %c = icmp ne i8 %s, 0 1008 ret i1 %c 1009} 1010 1011define i1 @ashr_exact_eq_0_multiuse(i8 %x) { 1012; CHECK-LABEL: @ashr_exact_eq_0_multiuse( 1013; CHECK-NEXT: [[S:%.*]] = ashr exact i8 [[X:%.*]], 2 1014; CHECK-NEXT: call void @use(i8 [[S]]) 1015; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[X]], 0 1016; CHECK-NEXT: ret i1 [[C]] 1017; 1018 %s = ashr exact i8 %x, 2 1019 call void @use(i8 %s) 1020 %c = icmp eq i8 %s, 0 1021 ret i1 %c 1022} 1023 1024define i1 @ashr_exact_ne_0_multiuse(i8 %x) { 1025; CHECK-LABEL: @ashr_exact_ne_0_multiuse( 1026; CHECK-NEXT: [[S:%.*]] = ashr exact i8 [[X:%.*]], 2 1027; CHECK-NEXT: call void @use(i8 [[S]]) 1028; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[X]], 0 1029; CHECK-NEXT: ret i1 [[C]] 1030; 1031 %s = ashr exact i8 %x, 2 1032 call void @use(i8 %s) 1033 %c = icmp ne i8 %s, 0 1034 ret i1 %c 1035} 1036 1037define i1 @lshr_pow2_ugt(i8 %x) { 1038; CHECK-LABEL: @lshr_pow2_ugt( 1039; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X:%.*]], 0 1040; CHECK-NEXT: ret i1 [[R]] 1041; 1042 %s = lshr i8 2, %x 1043 %r = icmp ugt i8 %s, 1 1044 ret i1 %r 1045} 1046 1047define i1 @lshr_pow2_ugt_use(i8 %x) { 1048; CHECK-LABEL: @lshr_pow2_ugt_use( 1049; CHECK-NEXT: [[S:%.*]] = lshr exact i8 -128, [[X:%.*]] 1050; CHECK-NEXT: call void @use(i8 [[S]]) 1051; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X]], 5 1052; CHECK-NEXT: ret i1 [[R]] 1053; 1054 %s = lshr i8 128, %x 1055 call void @use(i8 %s) 1056 %r = icmp ugt i8 %s, 5 1057 ret i1 %r 1058} 1059 1060define <2 x i1> @lshr_pow2_ugt_vec(<2 x i8> %x) { 1061; CHECK-LABEL: @lshr_pow2_ugt_vec( 1062; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[X:%.*]], zeroinitializer 1063; CHECK-NEXT: ret <2 x i1> [[R]] 1064; 1065 %s = lshr <2 x i8> <i8 8, i8 8>, %x 1066 %r = icmp ugt <2 x i8> %s, <i8 6, i8 6> 1067 ret <2 x i1> %r 1068} 1069 1070; negative test - need power-of-2 1071 1072define i1 @lshr_not_pow2_ugt(i8 %x) { 1073; CHECK-LABEL: @lshr_not_pow2_ugt( 1074; CHECK-NEXT: [[S:%.*]] = lshr i8 3, [[X:%.*]] 1075; CHECK-NEXT: [[R:%.*]] = icmp samesign ugt i8 [[S]], 1 1076; CHECK-NEXT: ret i1 [[R]] 1077; 1078 %s = lshr i8 3, %x 1079 %r = icmp ugt i8 %s, 1 1080 ret i1 %r 1081} 1082 1083define i1 @lshr_pow2_ugt1(i8 %x) { 1084; CHECK-LABEL: @lshr_pow2_ugt1( 1085; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], 7 1086; CHECK-NEXT: ret i1 [[R]] 1087; 1088 %s = lshr i8 128, %x 1089 %r = icmp ugt i8 %s, 1 1090 ret i1 %r 1091} 1092 1093; negative test - need logical shift 1094 1095define i1 @ashr_pow2_ugt(i8 %x) { 1096; CHECK-LABEL: @ashr_pow2_ugt( 1097; CHECK-NEXT: [[S:%.*]] = ashr exact i8 -128, [[X:%.*]] 1098; CHECK-NEXT: [[R:%.*]] = icmp samesign ugt i8 [[S]], -96 1099; CHECK-NEXT: ret i1 [[R]] 1100; 1101 %s = ashr i8 128, %x 1102 %r = icmp ugt i8 %s, 160 1103 ret i1 %r 1104} 1105 1106; negative test - need unsigned pred 1107 1108define i1 @lshr_pow2_sgt(i8 %x) { 1109; CHECK-LABEL: @lshr_pow2_sgt( 1110; CHECK-NEXT: [[S:%.*]] = lshr exact i8 -128, [[X:%.*]] 1111; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[S]], 3 1112; CHECK-NEXT: ret i1 [[R]] 1113; 1114 %s = lshr i8 128, %x 1115 %r = icmp sgt i8 %s, 3 1116 ret i1 %r 1117} 1118 1119define i1 @lshr_pow2_ult(i8 %x) { 1120; CHECK-LABEL: @lshr_pow2_ult( 1121; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], 1 1122; CHECK-NEXT: ret i1 [[R]] 1123; 1124 %s = lshr i8 4, %x 1125 %r = icmp ult i8 %s, 2 1126 ret i1 %r 1127} 1128 1129define i1 @lshr_pow2_ult_use(i8 %x) { 1130; CHECK-LABEL: @lshr_pow2_ult_use( 1131; CHECK-NEXT: [[S:%.*]] = lshr exact i8 -128, [[X:%.*]] 1132; CHECK-NEXT: call void @use(i8 [[S]]) 1133; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], 4 1134; CHECK-NEXT: ret i1 [[R]] 1135; 1136 %s = lshr i8 128, %x 1137 call void @use(i8 %s) 1138 %r = icmp ult i8 %s, 5 1139 ret i1 %r 1140} 1141 1142define <2 x i1> @lshr_pow2_ult_vec(<2 x i8> %x) { 1143; CHECK-LABEL: @lshr_pow2_ult_vec( 1144; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i8> [[X:%.*]], zeroinitializer 1145; CHECK-NEXT: ret <2 x i1> [[R]] 1146; 1147 %s = lshr <2 x i8> <i8 8, i8 8>, %x 1148 %r = icmp ult <2 x i8> %s, <i8 6, i8 6> 1149 ret <2 x i1> %r 1150} 1151 1152; negative test - need power-of-2 1153 1154define i1 @lshr_not_pow2_ult(i8 %x) { 1155; CHECK-LABEL: @lshr_not_pow2_ult( 1156; CHECK-NEXT: [[S:%.*]] = lshr i8 3, [[X:%.*]] 1157; CHECK-NEXT: [[R:%.*]] = icmp samesign ult i8 [[S]], 2 1158; CHECK-NEXT: ret i1 [[R]] 1159; 1160 %s = lshr i8 3, %x 1161 %r = icmp ult i8 %s, 2 1162 ret i1 %r 1163} 1164 1165define i1 @lshr_pow2_ult_equal_constants(i32 %x) { 1166; CHECK-LABEL: @lshr_pow2_ult_equal_constants( 1167; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[X:%.*]], 0 1168; CHECK-NEXT: ret i1 [[R]] 1169; 1170 %shr = lshr i32 16, %x 1171 %r = icmp ult i32 %shr, 16 1172 ret i1 %r 1173} 1174 1175define i1 @lshr_pow2_ult_smin(i8 %x) { 1176; CHECK-LABEL: @lshr_pow2_ult_smin( 1177; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X:%.*]], 0 1178; CHECK-NEXT: ret i1 [[R]] 1179; 1180 %s = lshr i8 128, %x 1181 %r = icmp ult i8 %s, 128 1182 ret i1 %r 1183} 1184 1185; negative test - need logical shift 1186 1187define i1 @ashr_pow2_ult(i8 %x) { 1188; CHECK-LABEL: @ashr_pow2_ult( 1189; CHECK-NEXT: [[S:%.*]] = ashr exact i8 -128, [[X:%.*]] 1190; CHECK-NEXT: [[R:%.*]] = icmp samesign ult i8 [[S]], -96 1191; CHECK-NEXT: ret i1 [[R]] 1192; 1193 %s = ashr i8 128, %x 1194 %r = icmp ult i8 %s, 160 1195 ret i1 %r 1196} 1197 1198; negative test - need unsigned pred 1199 1200define i1 @lshr_pow2_slt(i8 %x) { 1201; CHECK-LABEL: @lshr_pow2_slt( 1202; CHECK-NEXT: [[S:%.*]] = lshr exact i8 -128, [[X:%.*]] 1203; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[S]], 3 1204; CHECK-NEXT: ret i1 [[R]] 1205; 1206 %s = lshr i8 128, %x 1207 %r = icmp slt i8 %s, 3 1208 ret i1 %r 1209} 1210 1211; (ShiftValC >> X) >s -1 --> X != 0 with ShiftValC < 0 1212 1213define i1 @lshr_neg_sgt_minus_1(i8 %x) { 1214; CHECK-LABEL: @lshr_neg_sgt_minus_1( 1215; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X:%.*]], 0 1216; CHECK-NEXT: ret i1 [[R]] 1217; 1218 %s = lshr i8 -17, %x 1219 %r = icmp sgt i8 %s, -1 1220 ret i1 %r 1221} 1222 1223define <2 x i1> @lshr_neg_sgt_minus_1_vector(<2 x i8> %x) { 1224; CHECK-LABEL: @lshr_neg_sgt_minus_1_vector( 1225; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i8> [[X:%.*]], zeroinitializer 1226; CHECK-NEXT: ret <2 x i1> [[R]] 1227; 1228 %s = lshr <2 x i8> <i8 -17, i8 -17>, %x 1229 %r = icmp sgt <2 x i8> %s, <i8 -1, i8 -1> 1230 ret <2 x i1> %r 1231} 1232 1233define i1 @lshr_neg_sgt_minus_1_extra_use(i8 %x) { 1234; CHECK-LABEL: @lshr_neg_sgt_minus_1_extra_use( 1235; CHECK-NEXT: [[S:%.*]] = lshr i8 -17, [[X:%.*]] 1236; CHECK-NEXT: call void @use(i8 [[S]]) 1237; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X]], 0 1238; CHECK-NEXT: ret i1 [[R]] 1239; 1240 %s = lshr i8 -17, %x 1241 call void @use(i8 %s) 1242 %r = icmp sgt i8 %s, -1 1243 ret i1 %r 1244} 1245 1246; Negative tests 1247 1248define i1 @lshr_neg_sgt_minus_2(i8 %x) { 1249; CHECK-LABEL: @lshr_neg_sgt_minus_2( 1250; CHECK-NEXT: [[S:%.*]] = lshr i8 -17, [[X:%.*]] 1251; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[S]], -2 1252; CHECK-NEXT: ret i1 [[R]] 1253; 1254 %s = lshr i8 -17, %x 1255 %r = icmp sgt i8 %s, -2 1256 ret i1 %r 1257} 1258 1259define i1 @lshr_neg_slt_minus_1(i8 %x) { 1260; CHECK-LABEL: @lshr_neg_slt_minus_1( 1261; CHECK-NEXT: [[S:%.*]] = lshr i8 -17, [[X:%.*]] 1262; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[S]], -1 1263; CHECK-NEXT: ret i1 [[R]] 1264; 1265 %s = lshr i8 -17, %x 1266 %r = icmp slt i8 %s, -1 1267 ret i1 %r 1268} 1269 1270; (ShiftValC >> X) <s 0 --> X == 0 with ShiftValC < 0 1271 1272define i1 @lshr_neg_slt_zero(i8 %x) { 1273; CHECK-LABEL: @lshr_neg_slt_zero( 1274; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X:%.*]], 0 1275; CHECK-NEXT: ret i1 [[R]] 1276; 1277 %s = lshr i8 -17, %x 1278 %r = icmp slt i8 %s, 0 1279 ret i1 %r 1280} 1281 1282define <2 x i1> @lshr_neg_slt_zero_vector(<2 x i8> %x) { 1283; CHECK-LABEL: @lshr_neg_slt_zero_vector( 1284; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[X:%.*]], zeroinitializer 1285; CHECK-NEXT: ret <2 x i1> [[R]] 1286; 1287 %s = lshr <2 x i8> <i8 -17, i8 -17>, %x 1288 %r = icmp slt <2 x i8> %s, <i8 0, i8 0> 1289 ret <2 x i1> %r 1290} 1291 1292define i1 @lshr_neg_slt_zero_extra_use(i8 %x) { 1293; CHECK-LABEL: @lshr_neg_slt_zero_extra_use( 1294; CHECK-NEXT: [[S:%.*]] = lshr i8 -17, [[X:%.*]] 1295; CHECK-NEXT: call void @use(i8 [[S]]) 1296; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], 0 1297; CHECK-NEXT: ret i1 [[R]] 1298; 1299 %s = lshr i8 -17, %x 1300 call void @use(i8 %s) 1301 %r = icmp slt i8 %s, 0 1302 ret i1 %r 1303} 1304 1305; Negative tests 1306 1307define i1 @lshr_neg_slt_non-zero(i8 %x) { 1308; CHECK-LABEL: @lshr_neg_slt_non-zero( 1309; CHECK-NEXT: [[S:%.*]] = lshr i8 -17, [[X:%.*]] 1310; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[S]], 2 1311; CHECK-NEXT: ret i1 [[R]] 1312; 1313 %s = lshr i8 -17, %x 1314 %r = icmp slt i8 %s, 2 1315 ret i1 %r 1316} 1317 1318define i1 @lshr_neg_sgt_zero(i8 %x) { 1319; CHECK-LABEL: @lshr_neg_sgt_zero( 1320; CHECK-NEXT: [[S:%.*]] = lshr i8 -17, [[X:%.*]] 1321; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[S]], 0 1322; CHECK-NEXT: ret i1 [[R]] 1323; 1324 %s = lshr i8 -17, %x 1325 %r = icmp sgt i8 %s, 0 1326 ret i1 %r 1327} 1328 1329define i1 @exactly_one_set_signbit(i8 %x, i8 %y) { 1330; CHECK-LABEL: @exactly_one_set_signbit( 1331; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]] 1332; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i8 [[TMP1]], 0 1333; CHECK-NEXT: ret i1 [[TMP2]] 1334; 1335 %xsign = lshr i8 %x, 7 1336 %ypos = icmp sgt i8 %y, -1 1337 %yposz = zext i1 %ypos to i8 1338 %r = icmp eq i8 %xsign, %yposz 1339 ret i1 %r 1340} 1341 1342define i1 @exactly_one_set_signbit_use1(i8 %x, i8 %y) { 1343; CHECK-LABEL: @exactly_one_set_signbit_use1( 1344; CHECK-NEXT: [[XSIGN:%.*]] = lshr i8 [[X:%.*]], 7 1345; CHECK-NEXT: call void @use(i8 [[XSIGN]]) 1346; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]] 1347; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i8 [[TMP1]], 0 1348; CHECK-NEXT: ret i1 [[TMP2]] 1349; 1350 %xsign = lshr i8 %x, 7 1351 call void @use(i8 %xsign) 1352 %ypos = icmp sgt i8 %y, -1 1353 %yposz = zext i1 %ypos to i8 1354 %r = icmp eq i8 %xsign, %yposz 1355 ret i1 %r 1356} 1357 1358define <2 x i1> @same_signbit(<2 x i8> %x, <2 x i8> %y) { 1359; CHECK-LABEL: @same_signbit( 1360; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], [[Y:%.*]] 1361; CHECK-NEXT: [[R1:%.*]] = icmp sgt <2 x i8> [[TMP1]], splat (i8 -1) 1362; CHECK-NEXT: ret <2 x i1> [[R1]] 1363; 1364 %xsign = lshr <2 x i8> %x, <i8 7, i8 7> 1365 %ypos = icmp sgt <2 x i8> %y, <i8 -1, i8 -1> 1366 %yposz = zext <2 x i1> %ypos to <2 x i8> 1367 %r = icmp ne <2 x i8> %xsign, %yposz 1368 ret <2 x i1> %r 1369} 1370 1371define i1 @same_signbit_use2(i8 %x, i8 %y) { 1372; CHECK-LABEL: @same_signbit_use2( 1373; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i8 [[Y:%.*]], -1 1374; CHECK-NEXT: [[YPOSZ:%.*]] = zext i1 [[YPOS]] to i8 1375; CHECK-NEXT: call void @use(i8 [[YPOSZ]]) 1376; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y]] 1377; CHECK-NEXT: [[R1:%.*]] = icmp sgt i8 [[TMP1]], -1 1378; CHECK-NEXT: ret i1 [[R1]] 1379; 1380 %xsign = lshr i8 %x, 7 1381 %ypos = icmp sgt i8 %y, -1 1382 %yposz = zext i1 %ypos to i8 1383 call void @use(i8 %yposz) 1384 %r = icmp ne i8 %xsign, %yposz 1385 ret i1 %r 1386} 1387 1388; negative test 1389 1390define i1 @same_signbit_use3(i8 %x, i8 %y) { 1391; CHECK-LABEL: @same_signbit_use3( 1392; CHECK-NEXT: [[XSIGN:%.*]] = lshr i8 [[X:%.*]], 7 1393; CHECK-NEXT: call void @use(i8 [[XSIGN]]) 1394; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i8 [[Y:%.*]], -1 1395; CHECK-NEXT: [[YPOSZ:%.*]] = zext i1 [[YPOS]] to i8 1396; CHECK-NEXT: call void @use(i8 [[YPOSZ]]) 1397; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[XSIGN]], [[YPOSZ]] 1398; CHECK-NEXT: ret i1 [[R]] 1399; 1400 %xsign = lshr i8 %x, 7 1401 call void @use(i8 %xsign) 1402 %ypos = icmp sgt i8 %y, -1 1403 %yposz = zext i1 %ypos to i8 1404 call void @use(i8 %yposz) 1405 %r = icmp ne i8 %xsign, %yposz 1406 ret i1 %r 1407} 1408 1409define <2 x i1> @same_signbit_poison_elts(<2 x i8> %x, <2 x i8> %y) { 1410; CHECK-LABEL: @same_signbit_poison_elts( 1411; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt <2 x i8> [[Y:%.*]], <i8 -1, i8 poison> 1412; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i8> [[X:%.*]], zeroinitializer 1413; CHECK-NEXT: [[R1:%.*]] = xor <2 x i1> [[TMP1]], [[YPOS]] 1414; CHECK-NEXT: ret <2 x i1> [[R1]] 1415; 1416 %xsign = lshr <2 x i8> %x, <i8 7, i8 poison> 1417 %ypos = icmp sgt <2 x i8> %y, <i8 -1, i8 poison> 1418 %yposz = zext <2 x i1> %ypos to <2 x i8> 1419 %r = icmp ne <2 x i8> %xsign, %yposz 1420 ret <2 x i1> %r 1421} 1422 1423; negative test 1424 1425define i1 @same_signbit_wrong_type(i8 %x, i32 %y) { 1426; CHECK-LABEL: @same_signbit_wrong_type( 1427; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i32 [[Y:%.*]], -1 1428; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i8 [[X:%.*]], 0 1429; CHECK-NEXT: [[R1:%.*]] = xor i1 [[TMP1]], [[YPOS]] 1430; CHECK-NEXT: ret i1 [[R1]] 1431; 1432 %xsign = lshr i8 %x, 7 1433 %ypos = icmp sgt i32 %y, -1 1434 %yposz = zext i1 %ypos to i8 1435 %r = icmp ne i8 %xsign, %yposz 1436 ret i1 %r 1437} 1438 1439; negative test 1440 1441define i1 @exactly_one_set_signbit_wrong_shamt(i8 %x, i8 %y) { 1442; CHECK-LABEL: @exactly_one_set_signbit_wrong_shamt( 1443; CHECK-NEXT: [[XSIGN:%.*]] = lshr i8 [[X:%.*]], 6 1444; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i8 [[Y:%.*]], -1 1445; CHECK-NEXT: [[YPOSZ:%.*]] = zext i1 [[YPOS]] to i8 1446; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[XSIGN]], [[YPOSZ]] 1447; CHECK-NEXT: ret i1 [[R]] 1448; 1449 %xsign = lshr i8 %x, 6 1450 %ypos = icmp sgt i8 %y, -1 1451 %yposz = zext i1 %ypos to i8 1452 %r = icmp eq i8 %xsign, %yposz 1453 ret i1 %r 1454} 1455 1456; negative test 1457; TODO: This could reduce. 1458 1459define i1 @exactly_one_set_signbit_wrong_shr(i8 %x, i8 %y) { 1460; CHECK-LABEL: @exactly_one_set_signbit_wrong_shr( 1461; CHECK-NEXT: [[XSIGN:%.*]] = ashr i8 [[X:%.*]], 7 1462; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i8 [[Y:%.*]], -1 1463; CHECK-NEXT: [[YPOSZ:%.*]] = zext i1 [[YPOS]] to i8 1464; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[XSIGN]], [[YPOSZ]] 1465; CHECK-NEXT: ret i1 [[R]] 1466; 1467 %xsign = ashr i8 %x, 7 1468 %ypos = icmp sgt i8 %y, -1 1469 %yposz = zext i1 %ypos to i8 1470 %r = icmp eq i8 %xsign, %yposz 1471 ret i1 %r 1472} 1473 1474; negative test 1475; TODO: This could reduce. 1476 1477define i1 @exactly_one_set_signbit_wrong_pred(i8 %x, i8 %y) { 1478; CHECK-LABEL: @exactly_one_set_signbit_wrong_pred( 1479; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[Y:%.*]], [[X:%.*]] 1480; CHECK-NEXT: [[R1:%.*]] = icmp slt i8 [[TMP1]], 0 1481; CHECK-NEXT: ret i1 [[R1]] 1482; 1483 %xsign = lshr i8 %x, 7 1484 %ypos = icmp sgt i8 %y, -1 1485 %yposz = zext i1 %ypos to i8 1486 %r = icmp sgt i8 %xsign, %yposz 1487 ret i1 %r 1488} 1489 1490define i1 @exactly_one_set_signbit_signed(i8 %x, i8 %y) { 1491; CHECK-LABEL: @exactly_one_set_signbit_signed( 1492; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]] 1493; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i8 [[TMP1]], 0 1494; CHECK-NEXT: ret i1 [[TMP2]] 1495; 1496 %xsign = ashr i8 %x, 7 1497 %ypos = icmp sgt i8 %y, -1 1498 %yposz = sext i1 %ypos to i8 1499 %r = icmp eq i8 %xsign, %yposz 1500 ret i1 %r 1501} 1502 1503define i1 @exactly_one_set_signbit_use1_signed(i8 %x, i8 %y) { 1504; CHECK-LABEL: @exactly_one_set_signbit_use1_signed( 1505; CHECK-NEXT: [[XSIGN:%.*]] = ashr i8 [[X:%.*]], 7 1506; CHECK-NEXT: call void @use(i8 [[XSIGN]]) 1507; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]] 1508; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i8 [[TMP1]], 0 1509; CHECK-NEXT: ret i1 [[TMP2]] 1510; 1511 %xsign = ashr i8 %x, 7 1512 call void @use(i8 %xsign) 1513 %ypos = icmp sgt i8 %y, -1 1514 %yposz = sext i1 %ypos to i8 1515 %r = icmp eq i8 %xsign, %yposz 1516 ret i1 %r 1517} 1518 1519define <2 x i1> @same_signbit_signed(<2 x i8> %x, <2 x i8> %y) { 1520; CHECK-LABEL: @same_signbit_signed( 1521; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], [[Y:%.*]] 1522; CHECK-NEXT: [[R1:%.*]] = icmp sgt <2 x i8> [[TMP1]], splat (i8 -1) 1523; CHECK-NEXT: ret <2 x i1> [[R1]] 1524; 1525 %xsign = ashr <2 x i8> %x, <i8 7, i8 7> 1526 %ypos = icmp sgt <2 x i8> %y, <i8 -1, i8 -1> 1527 %yposz = sext <2 x i1> %ypos to <2 x i8> 1528 %r = icmp ne <2 x i8> %xsign, %yposz 1529 ret <2 x i1> %r 1530} 1531 1532define i1 @same_signbit_use2_signed(i8 %x, i8 %y) { 1533; CHECK-LABEL: @same_signbit_use2_signed( 1534; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i8 [[Y:%.*]], -1 1535; CHECK-NEXT: [[YPOSZ:%.*]] = sext i1 [[YPOS]] to i8 1536; CHECK-NEXT: call void @use(i8 [[YPOSZ]]) 1537; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y]] 1538; CHECK-NEXT: [[R1:%.*]] = icmp sgt i8 [[TMP1]], -1 1539; CHECK-NEXT: ret i1 [[R1]] 1540; 1541 %xsign = ashr i8 %x, 7 1542 %ypos = icmp sgt i8 %y, -1 1543 %yposz = sext i1 %ypos to i8 1544 call void @use(i8 %yposz) 1545 %r = icmp ne i8 %xsign, %yposz 1546 ret i1 %r 1547} 1548 1549; negative test 1550 1551define i1 @same_signbit_use3_signed(i8 %x, i8 %y) { 1552; CHECK-LABEL: @same_signbit_use3_signed( 1553; CHECK-NEXT: [[XSIGN:%.*]] = ashr i8 [[X:%.*]], 7 1554; CHECK-NEXT: call void @use(i8 [[XSIGN]]) 1555; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i8 [[Y:%.*]], -1 1556; CHECK-NEXT: [[YPOSZ:%.*]] = sext i1 [[YPOS]] to i8 1557; CHECK-NEXT: call void @use(i8 [[YPOSZ]]) 1558; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[XSIGN]], [[YPOSZ]] 1559; CHECK-NEXT: ret i1 [[R]] 1560; 1561 %xsign = ashr i8 %x, 7 1562 call void @use(i8 %xsign) 1563 %ypos = icmp sgt i8 %y, -1 1564 %yposz = sext i1 %ypos to i8 1565 call void @use(i8 %yposz) 1566 %r = icmp ne i8 %xsign, %yposz 1567 ret i1 %r 1568} 1569 1570define <2 x i1> @same_signbit_poison_elts_signed(<2 x i8> %x, <2 x i8> %y) { 1571; CHECK-LABEL: @same_signbit_poison_elts_signed( 1572; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt <2 x i8> [[Y:%.*]], <i8 -1, i8 poison> 1573; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i8> [[X:%.*]], zeroinitializer 1574; CHECK-NEXT: [[R1:%.*]] = xor <2 x i1> [[TMP1]], [[YPOS]] 1575; CHECK-NEXT: ret <2 x i1> [[R1]] 1576; 1577 %xsign = ashr <2 x i8> %x, <i8 7, i8 poison> 1578 %ypos = icmp sgt <2 x i8> %y, <i8 -1, i8 poison> 1579 %yposz = sext <2 x i1> %ypos to <2 x i8> 1580 %r = icmp ne <2 x i8> %xsign, %yposz 1581 ret <2 x i1> %r 1582} 1583 1584; negative test 1585 1586define i1 @same_signbit_wrong_type_signed(i8 %x, i32 %y) { 1587; CHECK-LABEL: @same_signbit_wrong_type_signed( 1588; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i32 [[Y:%.*]], -1 1589; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i8 [[X:%.*]], 0 1590; CHECK-NEXT: [[R1:%.*]] = xor i1 [[TMP1]], [[YPOS]] 1591; CHECK-NEXT: ret i1 [[R1]] 1592; 1593 %xsign = ashr i8 %x, 7 1594 %ypos = icmp sgt i32 %y, -1 1595 %yposz = sext i1 %ypos to i8 1596 %r = icmp ne i8 %xsign, %yposz 1597 ret i1 %r 1598} 1599 1600; negative test 1601 1602define i1 @exactly_one_set_signbit_wrong_shamt_signed(i8 %x, i8 %y) { 1603; CHECK-LABEL: @exactly_one_set_signbit_wrong_shamt_signed( 1604; CHECK-NEXT: [[XSIGN:%.*]] = ashr i8 [[X:%.*]], 6 1605; CHECK-NEXT: [[YPOS:%.*]] = icmp sgt i8 [[Y:%.*]], -1 1606; CHECK-NEXT: [[YPOSZ:%.*]] = sext i1 [[YPOS]] to i8 1607; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[XSIGN]], [[YPOSZ]] 1608; CHECK-NEXT: ret i1 [[R]] 1609; 1610 %xsign = ashr i8 %x, 6 1611 %ypos = icmp sgt i8 %y, -1 1612 %yposz = sext i1 %ypos to i8 1613 %r = icmp eq i8 %xsign, %yposz 1614 ret i1 %r 1615} 1616 1617define i1 @slt_zero_ult_i1(i32 %a, i1 %b) { 1618; CHECK-LABEL: @slt_zero_ult_i1( 1619; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[A:%.*]], 0 1620; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[B:%.*]], true 1621; CHECK-NEXT: [[CMP21:%.*]] = and i1 [[TMP1]], [[TMP2]] 1622; CHECK-NEXT: ret i1 [[CMP21]] 1623; 1624 %conv = zext i1 %b to i32 1625 %cmp1 = lshr i32 %a, 31 1626 %cmp2 = icmp ult i32 %conv, %cmp1 1627 ret i1 %cmp2 1628} 1629 1630define i1 @slt_zero_ult_i1_fail1(i32 %a, i1 %b) { 1631; CHECK-LABEL: @slt_zero_ult_i1_fail1( 1632; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32 1633; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A:%.*]], 30 1634; CHECK-NEXT: [[CMP2:%.*]] = icmp samesign ugt i32 [[CMP1]], [[CONV]] 1635; CHECK-NEXT: ret i1 [[CMP2]] 1636; 1637 %conv = zext i1 %b to i32 1638 %cmp1 = lshr i32 %a, 30 1639 %cmp2 = icmp ult i32 %conv, %cmp1 1640 ret i1 %cmp2 1641} 1642 1643define i1 @slt_zero_ult_i1_fail2(i32 %a, i1 %b) { 1644; CHECK-LABEL: @slt_zero_ult_i1_fail2( 1645; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[B:%.*]] to i32 1646; CHECK-NEXT: [[CMP1:%.*]] = ashr i32 [[A:%.*]], 31 1647; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[CMP1]], [[CONV]] 1648; CHECK-NEXT: ret i1 [[CMP2]] 1649; 1650 %conv = zext i1 %b to i32 1651 %cmp1 = ashr i32 %a, 31 1652 %cmp2 = icmp ult i32 %conv, %cmp1 1653 ret i1 %cmp2 1654} 1655 1656define i1 @slt_zero_slt_i1_fail(i32 %a, i1 %b) { 1657; CHECK-LABEL: @slt_zero_slt_i1_fail( 1658; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[A:%.*]], 0 1659; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[B:%.*]], true 1660; CHECK-NEXT: [[CMP21:%.*]] = and i1 [[TMP1]], [[TMP2]] 1661; CHECK-NEXT: ret i1 [[CMP21]] 1662; 1663 %conv = zext i1 %b to i32 1664 %cmp1 = lshr i32 %a, 31 1665 %cmp2 = icmp slt i32 %conv, %cmp1 1666 ret i1 %cmp2 1667} 1668 1669define i1 @slt_zero_eq_i1_signed(i32 %a, i1 %b) { 1670; CHECK-LABEL: @slt_zero_eq_i1_signed( 1671; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[A:%.*]], -1 1672; CHECK-NEXT: [[CMP21:%.*]] = xor i1 [[TMP1]], [[B:%.*]] 1673; CHECK-NEXT: ret i1 [[CMP21]] 1674; 1675 %conv = sext i1 %b to i32 1676 %cmp1 = ashr i32 %a, 31 1677 %cmp2 = icmp eq i32 %conv, %cmp1 1678 ret i1 %cmp2 1679} 1680 1681define i1 @slt_zero_eq_i1_fail_signed(i32 %a, i1 %b) { 1682; CHECK-LABEL: @slt_zero_eq_i1_fail_signed( 1683; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[B:%.*]] to i32 1684; CHECK-NEXT: [[CMP1:%.*]] = lshr i32 [[A:%.*]], 31 1685; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]] 1686; CHECK-NEXT: ret i1 [[CMP2]] 1687; 1688 %conv = sext i1 %b to i32 1689 %cmp1 = lshr i32 %a, 31 1690 %cmp2 = icmp eq i32 %conv, %cmp1 1691 ret i1 %cmp2 1692} 1693