1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes=instcombine -S < %s | FileCheck %s 3 4declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8) nounwind readnone 5declare { i8, i1 } @llvm.sadd.with.overflow.i8(i8, i8) nounwind readnone 6declare { i8, i1 } @llvm.usub.with.overflow.i8(i8, i8) nounwind readnone 7declare { i8, i1 } @llvm.ssub.with.overflow.i8(i8, i8) nounwind readnone 8declare { i8, i1 } @llvm.umul.with.overflow.i8(i8, i8) nounwind readnone 9declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone 10declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone 11declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone 12declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone 13declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone 14declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone 15declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone 16 17define i8 @uaddtest1(i8 %A, i8 %B) { 18; CHECK-LABEL: @uaddtest1( 19; CHECK-NEXT: [[Y:%.*]] = add i8 [[A:%.*]], [[B:%.*]] 20; CHECK-NEXT: ret i8 [[Y]] 21; 22 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %A, i8 %B) 23 %y = extractvalue { i8, i1 } %x, 0 24 ret i8 %y 25} 26 27define i8 @uaddtest2(i8 %A, i8 %B, i1* %overflowPtr) { 28; CHECK-LABEL: @uaddtest2( 29; CHECK-NEXT: [[AND_A:%.*]] = and i8 [[A:%.*]], 127 30; CHECK-NEXT: [[AND_B:%.*]] = and i8 [[B:%.*]], 127 31; CHECK-NEXT: [[X:%.*]] = add nuw i8 [[AND_A]], [[AND_B]] 32; CHECK-NEXT: store i1 false, i1* [[OVERFLOWPTR:%.*]], align 1 33; CHECK-NEXT: ret i8 [[X]] 34; 35 %and.A = and i8 %A, 127 36 %and.B = and i8 %B, 127 37 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %and.A, i8 %and.B) 38 %y = extractvalue { i8, i1 } %x, 0 39 %z = extractvalue { i8, i1 } %x, 1 40 store i1 %z, i1* %overflowPtr 41 ret i8 %y 42} 43 44define i8 @uaddtest3(i8 %A, i8 %B, i1* %overflowPtr) { 45; CHECK-LABEL: @uaddtest3( 46; CHECK-NEXT: [[OR_A:%.*]] = or i8 [[A:%.*]], -128 47; CHECK-NEXT: [[OR_B:%.*]] = or i8 [[B:%.*]], -128 48; CHECK-NEXT: [[X:%.*]] = add i8 [[OR_A]], [[OR_B]] 49; CHECK-NEXT: store i1 true, i1* [[OVERFLOWPTR:%.*]], align 1 50; CHECK-NEXT: ret i8 [[X]] 51; 52 %or.A = or i8 %A, -128 53 %or.B = or i8 %B, -128 54 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %or.A, i8 %or.B) 55 %y = extractvalue { i8, i1 } %x, 0 56 %z = extractvalue { i8, i1 } %x, 1 57 store i1 %z, i1* %overflowPtr 58 ret i8 %y 59} 60 61define i8 @uaddtest4(i8 %A, i1* %overflowPtr) { 62; CHECK-LABEL: @uaddtest4( 63; CHECK-NEXT: store i1 false, i1* [[OVERFLOWPTR:%.*]], align 1 64; CHECK-NEXT: ret i8 -1 65; 66 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 undef, i8 %A) 67 %y = extractvalue { i8, i1 } %x, 0 68 %z = extractvalue { i8, i1 } %x, 1 69 store i1 %z, i1* %overflowPtr 70 ret i8 %y 71} 72 73define i8 @uaddtest5(i8 %A, i1* %overflowPtr) { 74; CHECK-LABEL: @uaddtest5( 75; CHECK-NEXT: store i1 false, i1* [[OVERFLOWPTR:%.*]], align 1 76; CHECK-NEXT: ret i8 [[A:%.*]] 77; 78 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 0, i8 %A) 79 %y = extractvalue { i8, i1 } %x, 0 80 %z = extractvalue { i8, i1 } %x, 1 81 store i1 %z, i1* %overflowPtr 82 ret i8 %y 83} 84 85define i1 @uaddtest6(i8 %A, i8 %B) { 86; CHECK-LABEL: @uaddtest6( 87; CHECK-NEXT: [[Z:%.*]] = icmp ugt i8 [[A:%.*]], 3 88; CHECK-NEXT: ret i1 [[Z]] 89; 90 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %A, i8 -4) 91 %z = extractvalue { i8, i1 } %x, 1 92 ret i1 %z 93} 94 95define i8 @uaddtest7(i8 %A, i8 %B) { 96; CHECK-LABEL: @uaddtest7( 97; CHECK-NEXT: [[Z:%.*]] = add i8 [[A:%.*]], [[B:%.*]] 98; CHECK-NEXT: ret i8 [[Z]] 99; 100 %x = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %A, i8 %B) 101 %z = extractvalue { i8, i1 } %x, 0 102 ret i8 %z 103} 104 105; PR20194 106define { i32, i1 } @saddtest_nsw(i8 %a, i8 %b) { 107; CHECK-LABEL: @saddtest_nsw( 108; CHECK-NEXT: [[AA:%.*]] = sext i8 [[A:%.*]] to i32 109; CHECK-NEXT: [[BB:%.*]] = sext i8 [[B:%.*]] to i32 110; CHECK-NEXT: [[X:%.*]] = add nsw i32 [[AA]], [[BB]] 111; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[X]], 0 112; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 113; 114 %aa = sext i8 %a to i32 115 %bb = sext i8 %b to i32 116 %x = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %aa, i32 %bb) 117 ret { i32, i1 } %x 118} 119 120define { i32, i1 } @uaddtest_nuw(i32 %a, i32 %b) { 121; CHECK-LABEL: @uaddtest_nuw( 122; CHECK-NEXT: [[AA:%.*]] = and i32 [[A:%.*]], 2147483647 123; CHECK-NEXT: [[BB:%.*]] = and i32 [[B:%.*]], 2147483647 124; CHECK-NEXT: [[X:%.*]] = add nuw i32 [[AA]], [[BB]] 125; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[X]], 0 126; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 127; 128 %aa = and i32 %a, 2147483647 129 %bb = and i32 %b, 2147483647 130 %x = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %aa, i32 %bb) 131 ret { i32, i1 } %x 132} 133 134define { i32, i1 } @ssubtest_nsw(i8 %a, i8 %b) { 135; CHECK-LABEL: @ssubtest_nsw( 136; CHECK-NEXT: [[AA:%.*]] = sext i8 [[A:%.*]] to i32 137; CHECK-NEXT: [[BB:%.*]] = sext i8 [[B:%.*]] to i32 138; CHECK-NEXT: [[X:%.*]] = sub nsw i32 [[AA]], [[BB]] 139; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[X]], 0 140; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 141; 142 %aa = sext i8 %a to i32 143 %bb = sext i8 %b to i32 144 %x = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %aa, i32 %bb) 145 ret { i32, i1 } %x 146} 147 148define { i32, i1 } @usubtest_nuw(i32 %a, i32 %b) { 149; CHECK-LABEL: @usubtest_nuw( 150; CHECK-NEXT: [[AA:%.*]] = or i32 [[A:%.*]], -2147483648 151; CHECK-NEXT: [[BB:%.*]] = and i32 [[B:%.*]], 2147483647 152; CHECK-NEXT: [[X:%.*]] = sub nuw i32 [[AA]], [[BB]] 153; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[X]], 0 154; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 155; 156 %aa = or i32 %a, 2147483648 157 %bb = and i32 %b, 2147483647 158 %x = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %aa, i32 %bb) 159 ret { i32, i1 } %x 160} 161 162define { i32, i1 } @smultest1_nsw(i32 %a, i32 %b) { 163; CHECK-LABEL: @smultest1_nsw( 164; CHECK-NEXT: [[AA:%.*]] = and i32 [[A:%.*]], 4095 165; CHECK-NEXT: [[BB:%.*]] = and i32 [[B:%.*]], 524287 166; CHECK-NEXT: [[X:%.*]] = mul nuw nsw i32 [[AA]], [[BB]] 167; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[X]], 0 168; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 169; 170 %aa = and i32 %a, 4095 ; 0xfff 171 %bb = and i32 %b, 524287; 0x7ffff 172 %x = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %aa, i32 %bb) 173 ret { i32, i1 } %x 174} 175 176define { i32, i1 } @smultest2_nsw(i32 %a, i32 %b) { 177; CHECK-LABEL: @smultest2_nsw( 178; CHECK-NEXT: [[AA:%.*]] = ashr i32 [[A:%.*]], 16 179; CHECK-NEXT: [[BB:%.*]] = ashr i32 [[B:%.*]], 16 180; CHECK-NEXT: [[X:%.*]] = mul nsw i32 [[AA]], [[BB]] 181; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[X]], 0 182; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 183; 184 %aa = ashr i32 %a, 16 185 %bb = ashr i32 %b, 16 186 %x = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %aa, i32 %bb) 187 ret { i32, i1 } %x 188} 189 190define { i32, i1 } @smultest3_sw(i32 %a, i32 %b) { 191; CHECK-LABEL: @smultest3_sw( 192; CHECK-NEXT: [[AA:%.*]] = ashr i32 [[A:%.*]], 16 193; CHECK-NEXT: [[BB:%.*]] = ashr i32 [[B:%.*]], 15 194; CHECK-NEXT: [[X:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[AA]], i32 [[BB]]) 195; CHECK-NEXT: ret { i32, i1 } [[X]] 196; 197 %aa = ashr i32 %a, 16 198 %bb = ashr i32 %b, 15 199 %x = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %aa, i32 %bb) 200 ret { i32, i1 } %x 201} 202 203define { i32, i1 } @umultest_nuw(i32 %a, i32 %b) { 204; CHECK-LABEL: @umultest_nuw( 205; CHECK-NEXT: [[AA:%.*]] = and i32 [[A:%.*]], 65535 206; CHECK-NEXT: [[BB:%.*]] = and i32 [[B:%.*]], 65535 207; CHECK-NEXT: [[X:%.*]] = mul nuw i32 [[AA]], [[BB]] 208; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[X]], 0 209; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 210; 211 %aa = and i32 %a, 65535 ; 0xffff 212 %bb = and i32 %b, 65535 ; 0xffff 213 %x = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %aa, i32 %bb) 214 ret { i32, i1 } %x 215} 216 217define i8 @umultest1(i8 %A, i1* %overflowPtr) { 218; CHECK-LABEL: @umultest1( 219; CHECK-NEXT: store i1 false, i1* [[OVERFLOWPTR:%.*]], align 1 220; CHECK-NEXT: ret i8 0 221; 222 %x = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 0, i8 %A) 223 %y = extractvalue { i8, i1 } %x, 0 224 %z = extractvalue { i8, i1 } %x, 1 225 store i1 %z, i1* %overflowPtr 226 ret i8 %y 227} 228 229define i8 @umultest2(i8 %A, i1* %overflowPtr) { 230; CHECK-LABEL: @umultest2( 231; CHECK-NEXT: store i1 false, i1* [[OVERFLOWPTR:%.*]], align 1 232; CHECK-NEXT: ret i8 [[A:%.*]] 233; 234 %x = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 1, i8 %A) 235 %y = extractvalue { i8, i1 } %x, 0 236 %z = extractvalue { i8, i1 } %x, 1 237 store i1 %z, i1* %overflowPtr 238 ret i8 %y 239} 240 241define i32 @umultest3(i32 %n) nounwind { 242; CHECK-LABEL: @umultest3( 243; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[N:%.*]], 2 244; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[SHR]], 3 245; CHECK-NEXT: ret i32 [[MUL]] 246; 247 %shr = lshr i32 %n, 2 248 %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %shr, i32 3) 249 %ov = extractvalue { i32, i1 } %mul, 1 250 %res = extractvalue { i32, i1 } %mul, 0 251 %ret = select i1 %ov, i32 -1, i32 %res 252 ret i32 %ret 253} 254 255define i32 @umultest4(i32 %n) nounwind { 256; CHECK-LABEL: @umultest4( 257; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[N:%.*]], 1 258; CHECK-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[SHR]], i32 4) 259; CHECK-NEXT: [[OV:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 260; CHECK-NEXT: [[RES:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 261; CHECK-NEXT: [[RET:%.*]] = select i1 [[OV]], i32 -1, i32 [[RES]] 262; CHECK-NEXT: ret i32 [[RET]] 263; 264 %shr = lshr i32 %n, 1 265 %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %shr, i32 4) 266 %ov = extractvalue { i32, i1 } %mul, 1 267 %res = extractvalue { i32, i1 } %mul, 0 268 %ret = select i1 %ov, i32 -1, i32 %res 269 ret i32 %ret 270} 271 272define { i32, i1 } @umultest5(i32 %x, i32 %y) nounwind { 273; CHECK-LABEL: @umultest5( 274; CHECK-NEXT: [[OR_X:%.*]] = or i32 [[X:%.*]], -2147483648 275; CHECK-NEXT: [[OR_Y:%.*]] = or i32 [[Y:%.*]], -2147483648 276; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[OR_X]], [[OR_Y]] 277; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 true }, i32 [[MUL]], 0 278; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 279; 280 %or_x = or i32 %x, 2147483648 281 %or_y = or i32 %y, 2147483648 282 %mul = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %or_x, i32 %or_y) 283 ret { i32, i1 } %mul 284} 285 286define i1 @overflow_div_add(i32 %v1, i32 %v2) nounwind { 287; CHECK-LABEL: @overflow_div_add( 288; CHECK-NEXT: ret i1 false 289; 290 %div = sdiv i32 %v1, 2 291 %t = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %div, i32 1) 292 %obit = extractvalue { i32, i1 } %t, 1 293 ret i1 %obit 294} 295 296define i1 @overflow_div_sub(i32 %v1, i32 %v2) nounwind { 297 ; Check cases where the known sign bits are larger than the word size. 298; CHECK-LABEL: @overflow_div_sub( 299; CHECK-NEXT: ret i1 false 300; 301 %a = ashr i32 %v1, 18 302 %div = sdiv i32 %a, 65536 303 %t = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %div, i32 1) 304 %obit = extractvalue { i32, i1 } %t, 1 305 ret i1 %obit 306} 307 308define i1 @overflow_mod_mul(i32 %v1, i32 %v2) nounwind { 309; CHECK-LABEL: @overflow_mod_mul( 310; CHECK-NEXT: ret i1 false 311; 312 %rem = srem i32 %v1, 1000 313 %t = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem) 314 %obit = extractvalue { i32, i1 } %t, 1 315 ret i1 %obit 316} 317 318define i1 @overflow_mod_overflow_mul(i32 %v1, i32 %v2) nounwind { 319; CHECK-LABEL: @overflow_mod_overflow_mul( 320; CHECK-NEXT: [[REM:%.*]] = srem i32 [[V1:%.*]], 65537 321; CHECK-NEXT: [[T:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[REM]], i32 [[REM]]) 322; CHECK-NEXT: [[OBIT:%.*]] = extractvalue { i32, i1 } [[T]], 1 323; CHECK-NEXT: ret i1 [[OBIT]] 324; 325 %rem = srem i32 %v1, 65537 326 ; This may overflow because the result of the mul operands may be greater than 16bits 327 ; and the result greater than 32. 328 %t = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem) 329 %obit = extractvalue { i32, i1 } %t, 1 330 ret i1 %obit 331} 332 333define i1 @overflow_mod_mul2(i16 %v1, i32 %v2) nounwind { 334; CHECK-LABEL: @overflow_mod_mul2( 335; CHECK-NEXT: ret i1 false 336; 337 %a = sext i16 %v1 to i32 338 %rem = srem i32 %a, %v2 339 %t = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem) 340 %obit = extractvalue { i32, i1 } %t, 1 341 ret i1 %obit 342} 343 344define { i32, i1 } @ssubtest_reorder(i8 %a) { 345; CHECK-LABEL: @ssubtest_reorder( 346; CHECK-NEXT: [[AA:%.*]] = sext i8 [[A:%.*]] to i32 347; CHECK-NEXT: [[X:%.*]] = sub nsw i32 0, [[AA]] 348; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[X]], 0 349; CHECK-NEXT: ret { i32, i1 } [[TMP1]] 350; 351 %aa = sext i8 %a to i32 352 %x = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 0, i32 %aa) 353 ret { i32, i1 } %x 354} 355 356define { i32, i1 } @never_overflows_ssub_test0(i32 %a) { 357; CHECK-LABEL: @never_overflows_ssub_test0( 358; CHECK-NEXT: [[X:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[A:%.*]], 0 359; CHECK-NEXT: ret { i32, i1 } [[X]] 360; 361 %x = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 0) 362 ret { i32, i1 } %x 363} 364 365define i1 @uadd_res_ult_x(i32 %x, i32 %y, i1* %p) nounwind { 366; CHECK-LABEL: @uadd_res_ult_x( 367; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]]) 368; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1 369; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1 370; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1 371; CHECK-NEXT: ret i1 [[D]] 372; 373 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) 374 %b = extractvalue { i32, i1 } %a, 1 375 store i1 %b, i1* %p 376 %c = extractvalue { i32, i1 } %a, 0 377 %d = icmp ult i32 %c, %x 378 ret i1 %d 379} 380 381define i1 @uadd_res_ult_y(i32 %x, i32 %y, i1* %p) nounwind { 382; CHECK-LABEL: @uadd_res_ult_y( 383; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]]) 384; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1 385; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1 386; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1 387; CHECK-NEXT: ret i1 [[D]] 388; 389 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) 390 %b = extractvalue { i32, i1 } %a, 1 391 store i1 %b, i1* %p 392 %c = extractvalue { i32, i1 } %a, 0 393 %d = icmp ult i32 %c, %y 394 ret i1 %d 395} 396 397define i1 @uadd_res_ugt_x(i32 %xx, i32 %y, i1* %p) nounwind { 398; CHECK-LABEL: @uadd_res_ugt_x( 399; CHECK-NEXT: [[X:%.*]] = urem i32 42, [[XX:%.*]] 400; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X]], i32 [[Y:%.*]]) 401; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1 402; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1 403; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1 404; CHECK-NEXT: ret i1 [[D]] 405; 406 %x = urem i32 42, %xx ; Thwart complexity-based canonicalization 407 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) 408 %b = extractvalue { i32, i1 } %a, 1 409 store i1 %b, i1* %p 410 %c = extractvalue { i32, i1 } %a, 0 411 %d = icmp ugt i32 %x, %c 412 ret i1 %d 413} 414 415define i1 @uadd_res_ugt_y(i32 %x, i32 %yy, i1* %p) nounwind { 416; CHECK-LABEL: @uadd_res_ugt_y( 417; CHECK-NEXT: [[Y:%.*]] = urem i32 42, [[YY:%.*]] 418; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 [[Y]]) 419; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1 420; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1 421; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1 422; CHECK-NEXT: ret i1 [[D]] 423; 424 %y = urem i32 42, %yy ; Thwart complexity-based canonicalization 425 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) 426 %b = extractvalue { i32, i1 } %a, 1 427 store i1 %b, i1* %p 428 %c = extractvalue { i32, i1 } %a, 0 429 %d = icmp ugt i32 %y, %c 430 ret i1 %d 431} 432 433define i1 @uadd_res_ult_const(i32 %x, i1* %p) nounwind { 434; CHECK-LABEL: @uadd_res_ult_const( 435; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42) 436; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1 437; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1 438; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1 439; CHECK-NEXT: ret i1 [[D]] 440; 441 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 42) 442 %b = extractvalue { i32, i1 } %a, 1 443 store i1 %b, i1* %p 444 %c = extractvalue { i32, i1 } %a, 0 445 %d = icmp ult i32 %c, 42 446 ret i1 %d 447} 448 449define i1 @uadd_res_ult_const_one(i32 %x, i1* %p) nounwind { 450; CHECK-LABEL: @uadd_res_ult_const_one( 451; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 1) 452; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1 453; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1 454; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1 455; CHECK-NEXT: ret i1 [[D]] 456; 457 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 1) 458 %b = extractvalue { i32, i1 } %a, 1 459 store i1 %b, i1* %p 460 %c = extractvalue { i32, i1 } %a, 0 461 %d = icmp ult i32 %c, 1 462 ret i1 %d 463} 464 465define i1 @uadd_res_ult_const_minus_one(i32 %x, i1* %p) nounwind { 466; CHECK-LABEL: @uadd_res_ult_const_minus_one( 467; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 -1) 468; CHECK-NEXT: [[B:%.*]] = extractvalue { i32, i1 } [[A]], 1 469; CHECK-NEXT: store i1 [[B]], i1* [[P:%.*]], align 1 470; CHECK-NEXT: [[D:%.*]] = extractvalue { i32, i1 } [[A]], 1 471; CHECK-NEXT: ret i1 [[D]] 472; 473 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 -1) 474 %b = extractvalue { i32, i1 } %a, 1 475 store i1 %b, i1* %p 476 %c = extractvalue { i32, i1 } %a, 0 477 %d = icmp ult i32 %c, -1 478 ret i1 %d 479} 480 481define { i32, i1 } @sadd_canonicalize_constant_arg0(i32 %x) nounwind { 482; CHECK-LABEL: @sadd_canonicalize_constant_arg0( 483; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 42) 484; CHECK-NEXT: ret { i32, i1 } [[A]] 485; 486 %a = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 42, i32 %x) 487 ret { i32, i1 } %a 488} 489 490define { i32, i1 } @uadd_canonicalize_constant_arg0(i32 %x) nounwind { 491; CHECK-LABEL: @uadd_canonicalize_constant_arg0( 492; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 42) 493; CHECK-NEXT: ret { i32, i1 } [[A]] 494; 495 %a = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 42, i32 %x) 496 ret { i32, i1 } %a 497} 498 499define { i32, i1 } @ssub_no_canonicalize_constant_arg0(i32 %x) nounwind { 500; CHECK-LABEL: @ssub_no_canonicalize_constant_arg0( 501; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 42, i32 [[X:%.*]]) 502; CHECK-NEXT: ret { i32, i1 } [[A]] 503; 504 %a = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 42, i32 %x) 505 ret { i32, i1 } %a 506} 507 508define { i32, i1 } @usub_no_canonicalize_constant_arg0(i32 %x) nounwind { 509; CHECK-LABEL: @usub_no_canonicalize_constant_arg0( 510; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 42, i32 [[X:%.*]]) 511; CHECK-NEXT: ret { i32, i1 } [[A]] 512; 513 %a = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 42, i32 %x) 514 ret { i32, i1 } %a 515} 516 517define { i32, i1 } @smul_canonicalize_constant_arg0(i32 %x) nounwind { 518; CHECK-LABEL: @smul_canonicalize_constant_arg0( 519; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[X:%.*]], i32 42) 520; CHECK-NEXT: ret { i32, i1 } [[A]] 521; 522 %a = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 42, i32 %x) 523 ret { i32, i1 } %a 524} 525 526define { i32, i1 } @umul_canonicalize_constant_arg0(i32 %x) nounwind { 527; CHECK-LABEL: @umul_canonicalize_constant_arg0( 528; CHECK-NEXT: [[A:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 42) 529; CHECK-NEXT: ret { i32, i1 } [[A]] 530; 531 %a = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 42, i32 %x) 532 ret { i32, i1 } %a 533} 534 535; Always overflow tests 536 537define { i8, i1 } @uadd_always_overflow(i8 %x) nounwind { 538; CHECK-LABEL: @uadd_always_overflow( 539; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], 63 540; CHECK-NEXT: [[TMP2:%.*]] = insertvalue { i8, i1 } { i8 undef, i1 true }, i8 [[TMP1]], 0 541; CHECK-NEXT: ret { i8, i1 } [[TMP2]] 542; 543 %y = or i8 %x, 192 544 %a = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %y, i8 64) 545 ret { i8, i1 } %a 546} 547 548define { i8, i1 } @usub_always_overflow(i8 %x) nounwind { 549; CHECK-LABEL: @usub_always_overflow( 550; CHECK-NEXT: [[Y:%.*]] = or i8 [[X:%.*]], 64 551; CHECK-NEXT: [[A:%.*]] = sub nsw i8 63, [[Y]] 552; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 undef, i1 true }, i8 [[A]], 0 553; CHECK-NEXT: ret { i8, i1 } [[TMP1]] 554; 555 %y = or i8 %x, 64 556 %a = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 63, i8 %y) 557 ret { i8, i1 } %a 558} 559 560define { i8, i1 } @umul_always_overflow(i8 %x) nounwind { 561; CHECK-LABEL: @umul_always_overflow( 562; CHECK-NEXT: [[A:%.*]] = shl i8 [[X:%.*]], 1 563; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 undef, i1 true }, i8 [[A]], 0 564; CHECK-NEXT: ret { i8, i1 } [[TMP1]] 565; 566 %y = or i8 %x, 128 567 %a = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %y, i8 2) 568 ret { i8, i1 } %a 569} 570 571define { i8, i1 } @sadd_always_overflow(i8 %x) nounwind { 572; CHECK-LABEL: @sadd_always_overflow( 573; CHECK-NEXT: [[C:%.*]] = icmp sgt i8 [[X:%.*]], 100 574; CHECK-NEXT: [[Y:%.*]] = select i1 [[C]], i8 [[X]], i8 100 575; CHECK-NEXT: [[A:%.*]] = add nuw i8 [[Y]], 28 576; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 undef, i1 true }, i8 [[A]], 0 577; CHECK-NEXT: ret { i8, i1 } [[TMP1]] 578; 579 %c = icmp sgt i8 %x, 100 580 %y = select i1 %c, i8 %x, i8 100 581 %a = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 %y, i8 28) 582 ret { i8, i1 } %a 583} 584 585define { i8, i1 } @ssub_always_overflow(i8 %x) nounwind { 586; CHECK-LABEL: @ssub_always_overflow( 587; CHECK-NEXT: [[C:%.*]] = icmp sgt i8 [[X:%.*]], 29 588; CHECK-NEXT: [[Y:%.*]] = select i1 [[C]], i8 [[X]], i8 29 589; CHECK-NEXT: [[A:%.*]] = sub nuw i8 -100, [[Y]] 590; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i8, i1 } { i8 undef, i1 true }, i8 [[A]], 0 591; CHECK-NEXT: ret { i8, i1 } [[TMP1]] 592; 593 %c = icmp sgt i8 %x, 29 594 %y = select i1 %c, i8 %x, i8 29 595 %a = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 -100, i8 %y) 596 ret { i8, i1 } %a 597} 598 599define { i8, i1 } @smul_always_overflow(i8 %x) nounwind { 600; CHECK-LABEL: @smul_always_overflow( 601; CHECK-NEXT: [[C:%.*]] = icmp sgt i8 [[X:%.*]], 100 602; CHECK-NEXT: [[Y:%.*]] = select i1 [[C]], i8 [[X]], i8 100 603; CHECK-NEXT: [[A:%.*]] = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 [[Y]], i8 2) 604; CHECK-NEXT: ret { i8, i1 } [[A]] 605; 606 %c = icmp sgt i8 %x, 100 607 %y = select i1 %c, i8 %x, i8 100 608 %a = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %y, i8 2) 609 ret { i8, i1 } %a 610} 611 612declare { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8>, <4 x i8>) 613declare { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8>, <4 x i8>) 614declare { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8>, <4 x i8>) 615declare { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8>, <4 x i8>) 616declare { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8>, <4 x i8>) 617declare { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8>, <4 x i8>) 618 619; Always overflow 620 621define { <4 x i8>, <4 x i1> } @always_sadd_const_vector() nounwind { 622; CHECK-LABEL: @always_sadd_const_vector( 623; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -128, i8 -128, i8 -128, i8 -128>, <4 x i1> <i1 true, i1 true, i1 true, i1 true> } 624; 625 %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> <i8 127, i8 127, i8 127, i8 127>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>) 626 ret { <4 x i8>, <4 x i1> } %x 627} 628 629define { <4 x i8>, <4 x i1> } @always_uadd_const_vector() nounwind { 630; CHECK-LABEL: @always_uadd_const_vector( 631; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> zeroinitializer, <4 x i1> <i1 true, i1 true, i1 true, i1 true> } 632; 633 %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> <i8 255, i8 255, i8 255, i8 255>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>) 634 ret { <4 x i8>, <4 x i1> } %x 635} 636 637define { <4 x i8>, <4 x i1> } @always_ssub_const_vector() nounwind { 638; CHECK-LABEL: @always_ssub_const_vector( 639; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 127, i8 127, i8 127, i8 127>, <4 x i1> <i1 true, i1 true, i1 true, i1 true> } 640; 641 %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> <i8 -128, i8 -128, i8 -128, i8 -128>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>) 642 ret { <4 x i8>, <4 x i1> } %x 643} 644 645define { <4 x i8>, <4 x i1> } @always_usub_const_vector() nounwind { 646; CHECK-LABEL: @always_usub_const_vector( 647; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -1, i8 -1, i8 -1, i8 -1>, <4 x i1> <i1 true, i1 true, i1 true, i1 true> } 648; 649 %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> <i8 0, i8 0, i8 0, i8 0>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>) 650 ret { <4 x i8>, <4 x i1> } %x 651} 652 653; NOTE: LLVM doesn't (yet) detect the multiplication always results in a overflow 654define { <4 x i8>, <4 x i1> } @always_smul_const_vector() nounwind { 655; CHECK-LABEL: @always_smul_const_vector( 656; CHECK-NEXT: [[X:%.*]] = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 127, i8 127, i8 127, i8 127>, <4 x i8> <i8 3, i8 3, i8 3, i8 3>) 657; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } [[X]] 658; 659 %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 127, i8 127, i8 127, i8 127>, <4 x i8> <i8 3, i8 3, i8 3, i8 3>) 660 ret { <4 x i8>, <4 x i1> } %x 661} 662 663define { <4 x i8>, <4 x i1> } @always_umul_const_vector() nounwind { 664; CHECK-LABEL: @always_umul_const_vector( 665; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -3, i8 -3, i8 -3, i8 -3>, <4 x i1> <i1 true, i1 true, i1 true, i1 true> } 666; 667 %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> <i8 255, i8 255, i8 255, i8 255>, <4 x i8> <i8 3, i8 3, i8 3, i8 3>) 668 ret { <4 x i8>, <4 x i1> } %x 669} 670 671; Never overflow 672 673define { <4 x i8>, <4 x i1> } @never_sadd_const_vector() nounwind { 674; CHECK-LABEL: @never_sadd_const_vector( 675; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -50, i8 -10, i8 0, i8 60>, <4 x i1> zeroinitializer } 676; 677 %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> <i8 -10, i8 -20, i8 30, i8 40>, <4 x i8> <i8 -40, i8 10, i8 -30, i8 20>) 678 ret { <4 x i8>, <4 x i1> } %x 679} 680 681define { <4 x i8>, <4 x i1> } @never_uadd_const_vector() nounwind { 682; CHECK-LABEL: @never_uadd_const_vector( 683; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 32, i8 64, i8 96, i8 48>, <4 x i1> zeroinitializer } 684; 685 %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> <i8 0, i8 32, i8 64, i8 16>, <4 x i8> <i8 32, i8 32, i8 32, i8 32>) 686 ret { <4 x i8>, <4 x i1> } %x 687} 688 689define { <4 x i8>, <4 x i1> } @never_ssub_const_vector() nounwind { 690; CHECK-LABEL: @never_ssub_const_vector( 691; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 0, i8 10, i8 20, i8 30>, <4 x i1> zeroinitializer } 692; 693 %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> <i8 -10, i8 -10, i8 -10, i8 -10>, <4 x i8> <i8 -10, i8 -20, i8 -30, i8 -40>) 694 ret { <4 x i8>, <4 x i1> } %x 695} 696 697define { <4 x i8>, <4 x i1> } @never_usub_const_vector() nounwind { 698; CHECK-LABEL: @never_usub_const_vector( 699; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 127, i8 -1, i8 0, i8 -2>, <4 x i1> zeroinitializer } 700; 701 %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> <i8 255, i8 255, i8 255, i8 255>, <4 x i8> <i8 128, i8 0, i8 255, i8 1>) 702 ret { <4 x i8>, <4 x i1> } %x 703} 704 705define { <4 x i8>, <4 x i1> } @never_smul_const_vector() nounwind { 706; CHECK-LABEL: @never_smul_const_vector( 707; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -54, i8 -18, i8 -60, i8 -90>, <4 x i1> zeroinitializer } 708; 709 %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 -6, i8 -6, i8 -6, i8 -6>, <4 x i8> <i8 9, i8 3, i8 10, i8 15>) 710 ret { <4 x i8>, <4 x i1> } %x 711} 712 713define { <4 x i8>, <4 x i1> } @never_umul_const_vector() nounwind { 714; CHECK-LABEL: @never_umul_const_vector( 715; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 -31, i8 120, i8 60, i8 30>, <4 x i1> zeroinitializer } 716; 717 %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> <i8 15, i8 15, i8 15, i8 15>, <4 x i8> <i8 15, i8 8, i8 4, i8 2>) 718 ret { <4 x i8>, <4 x i1> } %x 719} 720 721; Neutral value 722 723define { <4 x i8>, <4 x i1> } @neutral_sadd_const_vector() nounwind { 724; CHECK-LABEL: @neutral_sadd_const_vector( 725; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer } 726; 727 %x = call { <4 x i8>, <4 x i1> } @llvm.sadd.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>) 728 ret { <4 x i8>, <4 x i1> } %x 729} 730 731define { <4 x i8>, <4 x i1> } @neutral_uadd_const_vector() nounwind { 732; CHECK-LABEL: @neutral_uadd_const_vector( 733; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer } 734; 735 %x = call { <4 x i8>, <4 x i1> } @llvm.uadd.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>) 736 ret { <4 x i8>, <4 x i1> } %x 737} 738 739define { <4 x i8>, <4 x i1> } @neutral_ssub_const_vector() nounwind { 740; CHECK-LABEL: @neutral_ssub_const_vector( 741; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer } 742; 743 %x = call { <4 x i8>, <4 x i1> } @llvm.ssub.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>) 744 ret { <4 x i8>, <4 x i1> } %x 745} 746 747define { <4 x i8>, <4 x i1> } @neutral_usub_const_vector() nounwind { 748; CHECK-LABEL: @neutral_usub_const_vector( 749; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer } 750; 751 %x = call { <4 x i8>, <4 x i1> } @llvm.usub.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 0, i8 0, i8 0, i8 0>) 752 ret { <4 x i8>, <4 x i1> } %x 753} 754 755define { <4 x i8>, <4 x i1> } @neutral_smul_const_vector() nounwind { 756; CHECK-LABEL: @neutral_smul_const_vector( 757; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer } 758; 759 %x = call { <4 x i8>, <4 x i1> } @llvm.smul.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>) 760 ret { <4 x i8>, <4 x i1> } %x 761} 762 763define { <4 x i8>, <4 x i1> } @neutral_umul_const_vector() nounwind { 764; CHECK-LABEL: @neutral_umul_const_vector( 765; CHECK-NEXT: ret { <4 x i8>, <4 x i1> } { <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i1> zeroinitializer } 766; 767 %x = call { <4 x i8>, <4 x i1> } @llvm.umul.with.overflow.v4i8(<4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> <i8 1, i8 1, i8 1, i8 1>) 768 ret { <4 x i8>, <4 x i1> } %x 769} 770