1 // Test CodeGen for Security Check Overflow Builtins. 2 3 // RUN: %clang_cc1 -triple "i686-unknown-unknown" -emit-llvm -x c %s -o - | FileCheck -DLONG_TYPE=i32 -DLONG_MAX=2147483647 %s 4 // RUN: %clang_cc1 -triple "x86_64-unknown-unknown" -emit-llvm -x c %s -o - | FileCheck -DLONG_TYPE=i64 -DLONG_MAX=9223372036854775807 %s 5 // RUN: %clang_cc1 -triple "x86_64-mingw32" -emit-llvm -x c %s -o - | FileCheck -DLONG_TYPE=i32 -DLONG_MAX=2147483647 %s 6 7 extern unsigned UnsignedErrorCode; 8 extern unsigned long UnsignedLongErrorCode; 9 extern unsigned long long UnsignedLongLongErrorCode; 10 extern int IntErrorCode; 11 extern long LongErrorCode; 12 extern long long LongLongErrorCode; 13 void overflowed(void); 14 15 unsigned test_add_overflow_uint_uint_uint(unsigned x, unsigned y) { 16 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_uint_uint_uint 17 // CHECK-NOT: ext 18 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 19 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 20 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 21 // CHECK: store i32 [[Q]], ptr 22 // CHECK: br i1 [[C]] 23 unsigned r; 24 if (__builtin_add_overflow(x, y, &r)) 25 overflowed(); 26 return r; 27 } 28 29 int test_add_overflow_int_int_int(int x, int y) { 30 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_int_int_int 31 // CHECK-NOT: ext 32 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 33 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 34 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 35 // CHECK: store i32 [[Q]], ptr 36 // CHECK: br i1 [[C]] 37 int r; 38 if (__builtin_add_overflow(x, y, &r)) 39 overflowed(); 40 return r; 41 } 42 43 int test_add_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y) { 44 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_xint31_xint31_xint31({{.+}}) 45 // CHECK: %loadedv = trunc i32 %{{.*}} to i31 46 // CHECK-NOT: ext 47 // CHECK: [[S:%.+]] = call { i31, i1 } @llvm.sadd.with.overflow.i31(i31 %{{.+}}, i31 %{{.+}}) 48 // CHECK-DAG: [[C:%.+]] = extractvalue { i31, i1 } [[S]], 1 49 // CHECK-DAG: [[Q:%.+]] = extractvalue { i31, i1 } [[S]], 0 50 // CHECK: [[STOREDV:%.+]] = sext i31 [[Q]] to i32 51 // CHECK: store i32 [[STOREDV]], ptr 52 // CHECK: br i1 [[C]] 53 _BitInt(31) r; 54 if (__builtin_add_overflow(x, y, &r)) 55 overflowed(); 56 return r; 57 } 58 59 unsigned test_sub_overflow_uint_uint_uint(unsigned x, unsigned y) { 60 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_sub_overflow_uint_uint_uint 61 // CHECK-NOT: ext 62 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 63 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 64 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 65 // CHECK: store i32 [[Q]], ptr 66 // CHECK: br i1 [[C]] 67 unsigned r; 68 if (__builtin_sub_overflow(x, y, &r)) 69 overflowed(); 70 return r; 71 } 72 73 int test_sub_overflow_int_int_int(int x, int y) { 74 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_sub_overflow_int_int_int 75 // CHECK-NOT: ext 76 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 77 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 78 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 79 // CHECK: store i32 [[Q]], ptr 80 // CHECK: br i1 [[C]] 81 int r; 82 if (__builtin_sub_overflow(x, y, &r)) 83 overflowed(); 84 return r; 85 } 86 87 int test_sub_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y) { 88 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_sub_overflow_xint31_xint31_xint31({{.+}}) 89 // CHECK: %loadedv = trunc i32 %{{.*}} to i31 90 // CHECK-NOT: ext 91 // CHECK: [[S:%.+]] = call { i31, i1 } @llvm.ssub.with.overflow.i31(i31 %{{.+}}, i31 %{{.+}}) 92 // CHECK-DAG: [[C:%.+]] = extractvalue { i31, i1 } [[S]], 1 93 // CHECK-DAG: [[Q:%.+]] = extractvalue { i31, i1 } [[S]], 0 94 // CHECK: [[STOREDV:%.+]] = sext i31 [[Q]] to i32 95 // CHECK: store i32 [[STOREDV]], ptr 96 // CHECK: br i1 [[C]] 97 _BitInt(31) r; 98 if (__builtin_sub_overflow(x, y, &r)) 99 overflowed(); 100 return r; 101 } 102 103 unsigned test_mul_overflow_uint_uint_uint(unsigned x, unsigned y) { 104 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_uint_uint_uint 105 // CHECK-NOT: ext 106 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 107 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 108 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 109 // CHECK: store i32 [[Q]], ptr 110 // CHECK: br i1 [[C]] 111 unsigned r; 112 if (__builtin_mul_overflow(x, y, &r)) 113 overflowed(); 114 return r; 115 } 116 117 int test_mul_overflow_uint_uint_int(unsigned x, unsigned y) { 118 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_uint_uint_int 119 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 120 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 121 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 122 // CHECK: [[C1:%.+]] = icmp ugt i32 [[Q]], 2147483647 123 // CHECK: [[C2:%.+]] = or i1 [[C]], [[C1]] 124 // CHECK: store i32 [[Q]], ptr 125 // CHECK: br i1 [[C2]] 126 int r; 127 if (__builtin_mul_overflow(x, y, &r)) 128 overflowed(); 129 return r; 130 } 131 132 int test_mul_overflow_uint_uint_int_volatile(unsigned x, unsigned y) { 133 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_uint_uint_int_volatile 134 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 135 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 136 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 137 // CHECK: [[C1:%.+]] = icmp ugt i32 [[Q]], 2147483647 138 // CHECK: [[C2:%.+]] = or i1 [[C]], [[C1]] 139 // CHECK: store volatile i32 [[Q]], ptr 140 // CHECK: br i1 [[C2]] 141 volatile int r; 142 if (__builtin_mul_overflow(x, y, &r)) 143 overflowed(); 144 return r; 145 } 146 147 long test_mul_overflow_ulong_ulong_long(unsigned long x, unsigned long y) { 148 // CHECK-LABEL: @test_mul_overflow_ulong_ulong_long 149 // CHECK: [[S:%.+]] = call { [[LONG_TYPE]], i1 } @llvm.umul.with.overflow.[[LONG_TYPE]]([[LONG_TYPE]] %{{.+}}, [[LONG_TYPE]] %{{.+}}) 150 // CHECK-DAG: [[Q:%.+]] = extractvalue { [[LONG_TYPE]], i1 } [[S]], 0 151 // CHECK-DAG: [[C:%.+]] = extractvalue { [[LONG_TYPE]], i1 } [[S]], 1 152 // CHECK: [[C1:%.+]] = icmp ugt [[LONG_TYPE]] [[Q]], [[LONG_MAX]] 153 // CHECK: [[C2:%.+]] = or i1 [[C]], [[C1]] 154 // LONG64: store [[LONG_TYPE]] [[Q]], ptr 155 // LONG64: br i1 [[C2]] 156 long r; 157 if (__builtin_mul_overflow(x, y, &r)) 158 overflowed(); 159 return r; 160 } 161 162 int test_mul_overflow_int_int_int(int x, int y) { 163 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_int_int_int 164 // CHECK-NOT: ext 165 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 166 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 167 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 168 // CHECK: store i32 [[Q]], ptr 169 // CHECK: br i1 [[C]] 170 int r; 171 if (__builtin_mul_overflow(x, y, &r)) 172 overflowed(); 173 return r; 174 } 175 176 int test_mul_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y) { 177 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_xint31_xint31_xint31({{.+}}) 178 // CHECK: %loadedv = trunc i32 %{{.*}} to i31 179 // CHECK-NOT: ext 180 // CHECK: [[S:%.+]] = call { i31, i1 } @llvm.smul.with.overflow.i31(i31 %{{.+}}, i31 %{{.+}}) 181 // CHECK-DAG: [[C:%.+]] = extractvalue { i31, i1 } [[S]], 1 182 // CHECK-DAG: [[Q:%.+]] = extractvalue { i31, i1 } [[S]], 0 183 // CHECK: [[STOREDV:%.+]] = sext i31 [[Q]] to i32 184 // CHECK: store i32 [[STOREDV]], ptr 185 // CHECK: br i1 [[C]] 186 _BitInt(31) r; 187 if (__builtin_mul_overflow(x, y, &r)) 188 overflowed(); 189 return r; 190 } 191 192 int test_mul_overflow_xint127_xint127_xint127(_BitInt(127) x, _BitInt(127) y) { 193 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_xint127_xint127_xint127({{.+}}) 194 // CHECK: %loadedv = trunc i128 %{{.*}} to i127 195 // CHECK-NOT: ext 196 // CHECK: [[S:%.+]] = call { i127, i1 } @llvm.smul.with.overflow.i127(i127 %{{.+}}, i127 %{{.+}}) 197 // CHECK-DAG: [[C:%.+]] = extractvalue { i127, i1 } [[S]], 1 198 // CHECK-DAG: [[Q:%.+]] = extractvalue { i127, i1 } [[S]], 0 199 // CHECK: [[STOREDV:%.+]] = sext i127 [[Q]] to i128 200 // CHECK: store i128 [[STOREDV]], ptr 201 // CHECK: br i1 [[C]] 202 _BitInt(127) r; 203 if (__builtin_mul_overflow(x, y, &r)) 204 overflowed(); 205 return r; 206 } 207 208 int test_mul_overflow_xint128_xint128_xint128(_BitInt(128) x, _BitInt(128) y) { 209 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_mul_overflow_xint128_xint128_xint128({{.+}}) 210 // CHECK-NOT: ext 211 // CHECK: [[S:%.+]] = call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %{{.+}}, i128 %{{.+}}) 212 // CHECK-DAG: [[C:%.+]] = extractvalue { i128, i1 } [[S]], 1 213 // CHECK-DAG: [[Q:%.+]] = extractvalue { i128, i1 } [[S]], 0 214 // CHECK: store i128 [[Q]], ptr 215 // CHECK: br i1 [[C]] 216 _BitInt(128) r; 217 if (__builtin_mul_overflow(x, y, &r)) 218 overflowed(); 219 return r; 220 } 221 222 int test_add_overflow_uint_int_int(unsigned x, int y) { 223 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_uint_int_int 224 // CHECK: [[XE:%.+]] = zext i32 %{{.+}} to i33 225 // CHECK: [[YE:%.+]] = sext i32 %{{.+}} to i33 226 // CHECK: [[S:%.+]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 [[XE]], i33 [[YE]]) 227 // CHECK-DAG: [[Q:%.+]] = extractvalue { i33, i1 } [[S]], 0 228 // CHECK-DAG: [[C1:%.+]] = extractvalue { i33, i1 } [[S]], 1 229 // CHECK: [[QT:%.+]] = trunc i33 [[Q]] to i32 230 // CHECK: [[QTE:%.+]] = sext i32 [[QT]] to i33 231 // CHECK: [[C2:%.+]] = icmp ne i33 [[Q]], [[QTE]] 232 // CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]] 233 // CHECK: store i32 [[QT]], ptr 234 // CHECK: br i1 [[C3]] 235 int r; 236 if (__builtin_add_overflow(x, y, &r)) 237 overflowed(); 238 return r; 239 } 240 241 _Bool test_add_overflow_uint_uint_bool(unsigned x, unsigned y) { 242 // CHECK-LABEL: define {{.*}} i1 @test_add_overflow_uint_uint_bool 243 // CHECK-NOT: ext 244 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 245 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 246 // CHECK-DAG: [[C1:%.+]] = extractvalue { i32, i1 } [[S]], 1 247 // CHECK: [[QT:%.+]] = trunc i32 [[Q]] to i1 248 // CHECK: [[QTE:%.+]] = zext i1 [[QT]] to i32 249 // CHECK: [[C2:%.+]] = icmp ne i32 [[Q]], [[QTE]] 250 // CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]] 251 // CHECK: [[QT2:%.+]] = zext i1 [[QT]] to i8 252 // CHECK: store i8 [[QT2]], ptr 253 // CHECK: br i1 [[C3]] 254 _Bool r; 255 if (__builtin_add_overflow(x, y, &r)) 256 overflowed(); 257 return r; 258 } 259 260 unsigned test_add_overflow_bool_bool_uint(_Bool x, _Bool y) { 261 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_bool_bool_uint 262 // CHECK: [[XE:%.+]] = zext i1 %{{.+}} to i32 263 // CHECK: [[YE:%.+]] = zext i1 %{{.+}} to i32 264 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[XE]], i32 [[YE]]) 265 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 266 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 267 // CHECK: store i32 [[Q]], ptr 268 // CHECK: br i1 [[C]] 269 unsigned r; 270 if (__builtin_add_overflow(x, y, &r)) 271 overflowed(); 272 return r; 273 } 274 275 _Bool test_add_overflow_bool_bool_bool(_Bool x, _Bool y) { 276 // CHECK-LABEL: define {{.*}} i1 @test_add_overflow_bool_bool_bool 277 // CHECK: [[S:%.+]] = call { i1, i1 } @llvm.uadd.with.overflow.i1(i1 %{{.+}}, i1 %{{.+}}) 278 // CHECK-DAG: [[Q:%.+]] = extractvalue { i1, i1 } [[S]], 0 279 // CHECK-DAG: [[C:%.+]] = extractvalue { i1, i1 } [[S]], 1 280 // CHECK: [[QT2:%.+]] = zext i1 [[Q]] to i8 281 // CHECK: store i8 [[QT2]], ptr 282 // CHECK: br i1 [[C]] 283 _Bool r; 284 if (__builtin_add_overflow(x, y, &r)) 285 overflowed(); 286 return r; 287 } 288 289 int test_add_overflow_volatile(int x, int y) { 290 // CHECK-LABEL: define {{(dso_local )?}}i32 @test_add_overflow_volatile 291 // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 292 // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0 293 // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1 294 // CHECK: store volatile i32 [[Q]], ptr 295 // CHECK: br i1 [[C]] 296 volatile int result; 297 if (__builtin_add_overflow(x, y, &result)) 298 overflowed(); 299 return result; 300 } 301 302 unsigned test_uadd_overflow(unsigned x, unsigned y) { 303 // CHECK: @test_uadd_overflow 304 // CHECK: %{{.+}} = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 305 unsigned result; 306 if (__builtin_uadd_overflow(x, y, &result)) 307 return UnsignedErrorCode; 308 return result; 309 } 310 311 unsigned long test_uaddl_overflow(unsigned long x, unsigned long y) { 312 // CHECK: @test_uaddl_overflow([[UL:i32|i64]] noundef %x 313 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.uadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) 314 unsigned long result; 315 if (__builtin_uaddl_overflow(x, y, &result)) 316 return UnsignedLongErrorCode; 317 return result; 318 } 319 320 unsigned long long test_uaddll_overflow(unsigned long long x, unsigned long long y) { 321 // CHECK: @test_uaddll_overflow 322 // CHECK: %{{.+}} = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}}) 323 unsigned long long result; 324 if (__builtin_uaddll_overflow(x, y, &result)) 325 return UnsignedLongLongErrorCode; 326 return result; 327 } 328 329 unsigned test_usub_overflow(unsigned x, unsigned y) { 330 // CHECK: @test_usub_overflow 331 // CHECK: %{{.+}} = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 332 unsigned result; 333 if (__builtin_usub_overflow(x, y, &result)) 334 return UnsignedErrorCode; 335 return result; 336 } 337 338 unsigned long test_usubl_overflow(unsigned long x, unsigned long y) { 339 // CHECK: @test_usubl_overflow([[UL:i32|i64]] noundef %x 340 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) 341 unsigned long result; 342 if (__builtin_usubl_overflow(x, y, &result)) 343 return UnsignedLongErrorCode; 344 return result; 345 } 346 347 unsigned long long test_usubll_overflow(unsigned long long x, unsigned long long y) { 348 // CHECK: @test_usubll_overflow 349 // CHECK: %{{.+}} = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}}) 350 unsigned long long result; 351 if (__builtin_usubll_overflow(x, y, &result)) 352 return UnsignedLongLongErrorCode; 353 return result; 354 } 355 356 unsigned test_umul_overflow(unsigned x, unsigned y) { 357 // CHECK: @test_umul_overflow 358 // CHECK: %{{.+}} = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 359 unsigned result; 360 if (__builtin_umul_overflow(x, y, &result)) 361 return UnsignedErrorCode; 362 return result; 363 } 364 365 unsigned long test_umull_overflow(unsigned long x, unsigned long y) { 366 // CHECK: @test_umull_overflow([[UL:i32|i64]] noundef %x 367 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.umul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) 368 unsigned long result; 369 if (__builtin_umull_overflow(x, y, &result)) 370 return UnsignedLongErrorCode; 371 return result; 372 } 373 374 unsigned long long test_umulll_overflow(unsigned long long x, unsigned long long y) { 375 // CHECK: @test_umulll_overflow 376 // CHECK: %{{.+}} = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}}) 377 unsigned long long result; 378 if (__builtin_umulll_overflow(x, y, &result)) 379 return UnsignedLongLongErrorCode; 380 return result; 381 } 382 383 int test_sadd_overflow(int x, int y) { 384 // CHECK: @test_sadd_overflow 385 // CHECK: %{{.+}} = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 386 int result; 387 if (__builtin_sadd_overflow(x, y, &result)) 388 return IntErrorCode; 389 return result; 390 } 391 392 long test_saddl_overflow(long x, long y) { 393 // CHECK: @test_saddl_overflow([[UL:i32|i64]] noundef %x 394 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.sadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) 395 long result; 396 if (__builtin_saddl_overflow(x, y, &result)) 397 return LongErrorCode; 398 return result; 399 } 400 401 long long test_saddll_overflow(long long x, long long y) { 402 // CHECK: @test_saddll_overflow 403 // CHECK: %{{.+}} = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}}) 404 long long result; 405 if (__builtin_saddll_overflow(x, y, &result)) 406 return LongLongErrorCode; 407 return result; 408 } 409 410 int test_ssub_overflow(int x, int y) { 411 // CHECK: @test_ssub_overflow 412 // CHECK: %{{.+}} = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 413 int result; 414 if (__builtin_ssub_overflow(x, y, &result)) 415 return IntErrorCode; 416 return result; 417 } 418 419 long test_ssubl_overflow(long x, long y) { 420 // CHECK: @test_ssubl_overflow([[UL:i32|i64]] noundef %x 421 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.ssub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) 422 long result; 423 if (__builtin_ssubl_overflow(x, y, &result)) 424 return LongErrorCode; 425 return result; 426 } 427 428 long long test_ssubll_overflow(long long x, long long y) { 429 // CHECK: @test_ssubll_overflow 430 // CHECK: %{{.+}} = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}}) 431 long long result; 432 if (__builtin_ssubll_overflow(x, y, &result)) 433 return LongLongErrorCode; 434 return result; 435 } 436 437 int test_smul_overflow(int x, int y) { 438 // CHECK: @test_smul_overflow 439 // CHECK: %{{.+}} = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}}) 440 int result; 441 if (__builtin_smul_overflow(x, y, &result)) 442 return IntErrorCode; 443 return result; 444 } 445 446 long test_smull_overflow(long x, long y) { 447 // CHECK: @test_smull_overflow([[UL:i32|i64]] noundef %x 448 // CHECK: %{{.+}} = call { [[UL]], i1 } @llvm.smul.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %{{.+}}) 449 long result; 450 if (__builtin_smull_overflow(x, y, &result)) 451 return LongErrorCode; 452 return result; 453 } 454 455 long long test_smulll_overflow(long long x, long long y) { 456 // CHECK: @test_smulll_overflow 457 // CHECK: %{{.+}} = call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %{{.+}}, i64 %{{.+}}) 458 long long result; 459 if (__builtin_smulll_overflow(x, y, &result)) 460 return LongLongErrorCode; 461 return result; 462 } 463 464 int test_mixed_sign_mul_overflow_sext_signed_op(int x, unsigned long long y) { 465 // CHECK: @test_mixed_sign_mul_overflow_sext_signed_op 466 // CHECK: [[SignedOp:%.*]] = sext i32 %0 to i64 467 // CHECK: [[IsNeg:%.*]] = icmp slt i64 [[SignedOp]], 0 468 int result; 469 if (__builtin_mul_overflow(x, y, &result)) 470 return LongErrorCode; 471 return result; 472 } 473 474 int test_mixed_sign_mul_overflow_zext_unsigned_op(long long x, unsigned y) { 475 // CHECK: @test_mixed_sign_mul_overflow_zext_unsigned_op 476 // CHECK: [[UnsignedOp:%.*]] = zext i32 %1 to i64 477 // CHECK: [[IsNeg:%.*]] = icmp slt i64 %0, 0 478 // CHECK: @llvm.umul.with.overflow.i64({{.*}}, i64 [[UnsignedOp]]) 479 int result; 480 if (__builtin_mul_overflow(x, y, &result)) 481 return LongErrorCode; 482 return result; 483 } 484 485 int test_mixed_sign_mull_overflow(int x, unsigned y) { 486 // CHECK: @test_mixed_sign_mull_overflow 487 // CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0 488 // CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]] 489 // CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]] 490 // CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}}) 491 // CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1 492 // CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0 493 // CHECK-NEXT: [[IsNegZext:%.*]] = zext i1 [[IsNeg]] to i32 494 // CHECK-NEXT: [[MaxResult:%.*]] = add i32 2147483647, [[IsNegZext]] 495 // CHECK-NEXT: [[SignedOFlow:%.*]] = icmp ugt i32 [[UnsignedResult]], [[MaxResult]] 496 // CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[SignedOFlow]] 497 // CHECK-NEXT: [[NegativeResult:%.*]] = sub i32 0, [[UnsignedResult]] 498 // CHECK-NEXT: [[Result:%.*]] = select i1 [[IsNeg]], i32 [[NegativeResult]], i32 [[UnsignedResult]] 499 // CHECK-NEXT: store i32 [[Result]], ptr %{{.*}}, align 4 500 // CHECK: br i1 [[OFlow]] 501 502 int result; 503 if (__builtin_mul_overflow(x, y, &result)) 504 return LongErrorCode; 505 return result; 506 } 507 508 int test_mixed_sign_mull_overflow_unsigned(int x, unsigned y) { 509 // CHECK: @test_mixed_sign_mull_overflow_unsigned 510 // CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0 511 // CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]] 512 // CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]] 513 // CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}}) 514 // CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1 515 // CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0 516 // CHECK-NEXT: [[NotNull:%.*]] = icmp ne i32 [[UnsignedResult]], 0 517 // CHECK-NEXT: [[Underflow:%.*]] = and i1 [[IsNeg]], [[NotNull]] 518 // CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[Underflow]] 519 // CHECK-NEXT: [[NegatedResult:%.*]] = sub i32 0, [[UnsignedResult]] 520 // CHECK-NEXT: [[Result:%.*]] = select i1 [[IsNeg]], i32 [[NegatedResult]], i32 [[UnsignedResult]] 521 // CHECK-NEXT: store i32 [[Result]], ptr %{{.*}}, align 4 522 // CHECK: br i1 [[OFlow]] 523 524 unsigned result; 525 if (__builtin_mul_overflow(x, y, &result)) 526 return LongErrorCode; 527 return result; 528 } 529 530 int test_mixed_sign_mull_overflow_swapped(int x, unsigned y) { 531 // CHECK: @test_mixed_sign_mull_overflow_swapped 532 // CHECK: call { i32, i1 } @llvm.umul.with.overflow.i32 533 // CHECK: add i32 2147483647 534 int result; 535 if (__builtin_mul_overflow(y, x, &result)) 536 return LongErrorCode; 537 return result; 538 } 539 540 long long test_mixed_sign_mulll_overflow(long long x, unsigned long long y) { 541 // CHECK: @test_mixed_sign_mulll_overflow 542 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64 543 // CHECK: add i64 92233720368547 544 long long result; 545 if (__builtin_mul_overflow(x, y, &result)) 546 return LongLongErrorCode; 547 return result; 548 } 549 550 long long test_mixed_sign_mulll_overflow_swapped(long long x, unsigned long long y) { 551 // CHECK: @test_mixed_sign_mulll_overflow_swapped 552 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64 553 // CHECK: add i64 92233720368547 554 long long result; 555 if (__builtin_mul_overflow(y, x, &result)) 556 return LongLongErrorCode; 557 return result; 558 } 559 560 long long test_mixed_sign_mulll_overflow_trunc_signed(long long x, unsigned long long y) { 561 // CHECK: @test_mixed_sign_mulll_overflow_trunc_signed 562 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64 563 // CHECK: add i64 2147483647 564 // CHECK: trunc 565 // CHECK: store 566 int result; 567 if (__builtin_mul_overflow(y, x, &result)) 568 return LongLongErrorCode; 569 return result; 570 } 571 572 long long test_mixed_sign_mulll_overflow_trunc_unsigned(long long x, unsigned long long y) { 573 // CHECK: @test_mixed_sign_mulll_overflow_trunc_unsigned 574 // CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64 575 // CHECK: [[NON_ZERO:%.*]] = icmp ne i64 [[UNSIGNED_RESULT:%.*]], 0 576 // CHECK-NEXT: [[UNDERFLOW:%.*]] = and i1 {{.*}}, [[NON_ZERO]] 577 // CHECK-NEXT: [[OVERFLOW_PRE_TRUNC:%.*]] = or i1 {{.*}}, [[UNDERFLOW]] 578 // CHECK-NEXT: [[TRUNC_OVERFLOW:%.*]] = icmp ugt i64 [[UNSIGNED_RESULT]], 4294967295 579 // CHECK-NEXT: [[OVERFLOW:%.*]] = or i1 [[OVERFLOW_PRE_TRUNC]], [[TRUNC_OVERFLOW]] 580 // CHECK-NEXT: [[NEGATED:%.*]] = sub i64 0, [[UNSIGNED_RESULT]] 581 // CHECK-NEXT: [[RESULT:%.*]] = select i1 {{.*}}, i64 [[NEGATED]], i64 [[UNSIGNED_RESULT]] 582 // CHECK-NEXT: trunc i64 [[RESULT]] to i32 583 // CHECK-NEXT: store 584 unsigned result; 585 if (__builtin_mul_overflow(y, x, &result)) 586 return LongLongErrorCode; 587 return result; 588 } 589 590 long long test_mixed_sign_mul_overflow_extend_signed(int x, unsigned y) { 591 // CHECK: @test_mixed_sign_mul_overflow_extend_signed 592 // CHECK: call { i64, i1 } @llvm.smul.with.overflow.i64 593 long long result; 594 if (__builtin_mul_overflow(y, x, &result)) 595 return LongLongErrorCode; 596 return result; 597 } 598 599 long long test_mixed_sign_mul_overflow_extend_unsigned(int x, unsigned y) { 600 // CHECK: @test_mixed_sign_mul_overflow_extend_unsigned 601 // CHECK: call { i65, i1 } @llvm.smul.with.overflow.i65 602 unsigned long long result; 603 if (__builtin_mul_overflow(y, x, &result)) 604 return LongLongErrorCode; 605 return result; 606 } 607