1 //===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions identifies calls to builtin functions that allocate 10 // or free memory. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/MemoryBuiltins.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/AliasAnalysis.h" 19 #include "llvm/Analysis/TargetFolder.h" 20 #include "llvm/Analysis/TargetLibraryInfo.h" 21 #include "llvm/Analysis/Utils/Local.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/IR/Argument.h" 24 #include "llvm/IR/Attributes.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/DerivedTypes.h" 28 #include "llvm/IR/Function.h" 29 #include "llvm/IR/GlobalAlias.h" 30 #include "llvm/IR/GlobalVariable.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Operator.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/Casting.h" 38 #include "llvm/Support/Debug.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include <cassert> 42 #include <cstdint> 43 #include <iterator> 44 #include <numeric> 45 #include <optional> 46 #include <type_traits> 47 #include <utility> 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "memory-builtins" 52 53 enum AllocType : uint8_t { 54 OpNewLike = 1<<0, // allocates; never returns null 55 MallocLike = 1<<1, // allocates; may return null 56 StrDupLike = 1<<2, 57 MallocOrOpNewLike = MallocLike | OpNewLike, 58 AllocLike = MallocOrOpNewLike | StrDupLike, 59 AnyAlloc = AllocLike 60 }; 61 62 enum class MallocFamily { 63 Malloc, 64 CPPNew, // new(unsigned int) 65 CPPNewAligned, // new(unsigned int, align_val_t) 66 CPPNewArray, // new[](unsigned int) 67 CPPNewArrayAligned, // new[](unsigned long, align_val_t) 68 MSVCNew, // new(unsigned int) 69 MSVCArrayNew, // new[](unsigned int) 70 VecMalloc, 71 KmpcAllocShared, 72 }; 73 74 StringRef mangledNameForMallocFamily(const MallocFamily &Family) { 75 switch (Family) { 76 case MallocFamily::Malloc: 77 return "malloc"; 78 case MallocFamily::CPPNew: 79 return "_Znwm"; 80 case MallocFamily::CPPNewAligned: 81 return "_ZnwmSt11align_val_t"; 82 case MallocFamily::CPPNewArray: 83 return "_Znam"; 84 case MallocFamily::CPPNewArrayAligned: 85 return "_ZnamSt11align_val_t"; 86 case MallocFamily::MSVCNew: 87 return "??2@YAPAXI@Z"; 88 case MallocFamily::MSVCArrayNew: 89 return "??_U@YAPAXI@Z"; 90 case MallocFamily::VecMalloc: 91 return "vec_malloc"; 92 case MallocFamily::KmpcAllocShared: 93 return "__kmpc_alloc_shared"; 94 } 95 llvm_unreachable("missing an alloc family"); 96 } 97 98 struct AllocFnsTy { 99 AllocType AllocTy; 100 unsigned NumParams; 101 // First and Second size parameters (or -1 if unused) 102 int FstParam, SndParam; 103 // Alignment parameter for aligned_alloc and aligned new 104 int AlignParam; 105 // Name of default allocator function to group malloc/free calls by family 106 MallocFamily Family; 107 }; 108 109 // clang-format off 110 // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to 111 // know which functions are nounwind, noalias, nocapture parameters, etc. 112 static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = { 113 {LibFunc_Znwj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int) 114 {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int, nothrow) 115 {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t) 116 {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t, nothrow) 117 {LibFunc_Znwm, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long) 118 {LibFunc_Znwm12__hot_cold_t, {OpNewLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long, __hot_cold_t) 119 {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long, nothrow) 120 {LibFunc_ZnwmRKSt9nothrow_t12__hot_cold_t, {MallocLike, 3, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long, nothrow, __hot_cold_t) 121 {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t) 122 {LibFunc_ZnwmSt11align_val_t12__hot_cold_t, {OpNewLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t, __hot_cold_t) 123 {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t, nothrow) 124 {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t, {MallocLike, 4, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t, nothrow, __hot_cold_t) 125 {LibFunc_Znaj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned int) 126 {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned int, nothrow) 127 {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t) 128 {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t, nothrow) 129 {LibFunc_Znam, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned long) 130 {LibFunc_Znam12__hot_cold_t, {OpNewLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new[](unsigned long, __hot_cold_t) 131 {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned long, nothrow) 132 {LibFunc_ZnamRKSt9nothrow_t12__hot_cold_t, {MallocLike, 3, 0, -1, -1, MallocFamily::CPPNew}}, // new[](unsigned long, nothrow, __hot_cold_t) 133 {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t) 134 {LibFunc_ZnamSt11align_val_t12__hot_cold_t, {OpNewLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new[](unsigned long, align_val_t, __hot_cold_t) 135 {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t, nothrow) 136 {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t12__hot_cold_t, {MallocLike, 4, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new[](unsigned long, align_val_t, nothrow, __hot_cold_t) 137 {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned int) 138 {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned int, nothrow) 139 {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned long long) 140 {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned long long, nothrow) 141 {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int) 142 {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int, nothrow) 143 {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long) 144 {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long, nothrow) 145 {LibFunc_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}}, 146 {LibFunc_dunder_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}}, 147 {LibFunc_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 148 {LibFunc_dunder_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 149 {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1, -1, MallocFamily::KmpcAllocShared}}, 150 }; 151 // clang-format on 152 153 static const Function *getCalledFunction(const Value *V, 154 bool &IsNoBuiltin) { 155 // Don't care about intrinsics in this case. 156 if (isa<IntrinsicInst>(V)) 157 return nullptr; 158 159 const auto *CB = dyn_cast<CallBase>(V); 160 if (!CB) 161 return nullptr; 162 163 IsNoBuiltin = CB->isNoBuiltin(); 164 165 if (const Function *Callee = CB->getCalledFunction()) 166 return Callee; 167 return nullptr; 168 } 169 170 /// Returns the allocation data for the given value if it's a call to a known 171 /// allocation function. 172 static std::optional<AllocFnsTy> 173 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy, 174 const TargetLibraryInfo *TLI) { 175 // Don't perform a slow TLI lookup, if this function doesn't return a pointer 176 // and thus can't be an allocation function. 177 if (!Callee->getReturnType()->isPointerTy()) 178 return std::nullopt; 179 180 // Make sure that the function is available. 181 LibFunc TLIFn; 182 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 183 return std::nullopt; 184 185 const auto *Iter = find_if( 186 AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) { 187 return P.first == TLIFn; 188 }); 189 190 if (Iter == std::end(AllocationFnData)) 191 return std::nullopt; 192 193 const AllocFnsTy *FnData = &Iter->second; 194 if ((FnData->AllocTy & AllocTy) != FnData->AllocTy) 195 return std::nullopt; 196 197 // Check function prototype. 198 int FstParam = FnData->FstParam; 199 int SndParam = FnData->SndParam; 200 FunctionType *FTy = Callee->getFunctionType(); 201 202 if (FTy->getReturnType()->isPointerTy() && 203 FTy->getNumParams() == FnData->NumParams && 204 (FstParam < 0 || 205 (FTy->getParamType(FstParam)->isIntegerTy(32) || 206 FTy->getParamType(FstParam)->isIntegerTy(64))) && 207 (SndParam < 0 || 208 FTy->getParamType(SndParam)->isIntegerTy(32) || 209 FTy->getParamType(SndParam)->isIntegerTy(64))) 210 return *FnData; 211 return std::nullopt; 212 } 213 214 static std::optional<AllocFnsTy> 215 getAllocationData(const Value *V, AllocType AllocTy, 216 const TargetLibraryInfo *TLI) { 217 bool IsNoBuiltinCall; 218 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 219 if (!IsNoBuiltinCall) 220 return getAllocationDataForFunction(Callee, AllocTy, TLI); 221 return std::nullopt; 222 } 223 224 static std::optional<AllocFnsTy> 225 getAllocationData(const Value *V, AllocType AllocTy, 226 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 227 bool IsNoBuiltinCall; 228 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 229 if (!IsNoBuiltinCall) 230 return getAllocationDataForFunction( 231 Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee))); 232 return std::nullopt; 233 } 234 235 static std::optional<AllocFnsTy> 236 getAllocationSize(const Value *V, const TargetLibraryInfo *TLI) { 237 bool IsNoBuiltinCall; 238 const Function *Callee = 239 getCalledFunction(V, IsNoBuiltinCall); 240 if (!Callee) 241 return std::nullopt; 242 243 // Prefer to use existing information over allocsize. This will give us an 244 // accurate AllocTy. 245 if (!IsNoBuiltinCall) 246 if (std::optional<AllocFnsTy> Data = 247 getAllocationDataForFunction(Callee, AnyAlloc, TLI)) 248 return Data; 249 250 Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize); 251 if (Attr == Attribute()) 252 return std::nullopt; 253 254 std::pair<unsigned, std::optional<unsigned>> Args = Attr.getAllocSizeArgs(); 255 256 AllocFnsTy Result; 257 // Because allocsize only tells us how many bytes are allocated, we're not 258 // really allowed to assume anything, so we use MallocLike. 259 Result.AllocTy = MallocLike; 260 Result.NumParams = Callee->getNumOperands(); 261 Result.FstParam = Args.first; 262 Result.SndParam = Args.second.value_or(-1); 263 // Allocsize has no way to specify an alignment argument 264 Result.AlignParam = -1; 265 return Result; 266 } 267 268 static AllocFnKind getAllocFnKind(const Value *V) { 269 if (const auto *CB = dyn_cast<CallBase>(V)) { 270 Attribute Attr = CB->getFnAttr(Attribute::AllocKind); 271 if (Attr.isValid()) 272 return AllocFnKind(Attr.getValueAsInt()); 273 } 274 return AllocFnKind::Unknown; 275 } 276 277 static AllocFnKind getAllocFnKind(const Function *F) { 278 Attribute Attr = F->getFnAttribute(Attribute::AllocKind); 279 if (Attr.isValid()) 280 return AllocFnKind(Attr.getValueAsInt()); 281 return AllocFnKind::Unknown; 282 } 283 284 static bool checkFnAllocKind(const Value *V, AllocFnKind Wanted) { 285 return (getAllocFnKind(V) & Wanted) != AllocFnKind::Unknown; 286 } 287 288 static bool checkFnAllocKind(const Function *F, AllocFnKind Wanted) { 289 return (getAllocFnKind(F) & Wanted) != AllocFnKind::Unknown; 290 } 291 292 /// Tests if a value is a call or invoke to a library function that 293 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup 294 /// like). 295 bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) { 296 return getAllocationData(V, AnyAlloc, TLI).has_value() || 297 checkFnAllocKind(V, AllocFnKind::Alloc | AllocFnKind::Realloc); 298 } 299 bool llvm::isAllocationFn( 300 const Value *V, 301 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 302 return getAllocationData(V, AnyAlloc, GetTLI).has_value() || 303 checkFnAllocKind(V, AllocFnKind::Alloc | AllocFnKind::Realloc); 304 } 305 306 /// Tests if a value is a call or invoke to a library function that 307 /// allocates memory via new. 308 bool llvm::isNewLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 309 return getAllocationData(V, OpNewLike, TLI).has_value(); 310 } 311 312 /// Tests if a value is a call or invoke to a library function that 313 /// allocates memory similar to malloc or calloc. 314 bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 315 // TODO: Function behavior does not match name. 316 return getAllocationData(V, MallocOrOpNewLike, TLI).has_value(); 317 } 318 319 /// Tests if a value is a call or invoke to a library function that 320 /// allocates memory (either malloc, calloc, or strdup like). 321 bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 322 return getAllocationData(V, AllocLike, TLI).has_value() || 323 checkFnAllocKind(V, AllocFnKind::Alloc); 324 } 325 326 /// Tests if a functions is a call or invoke to a library function that 327 /// reallocates memory (e.g., realloc). 328 bool llvm::isReallocLikeFn(const Function *F) { 329 return checkFnAllocKind(F, AllocFnKind::Realloc); 330 } 331 332 Value *llvm::getReallocatedOperand(const CallBase *CB) { 333 if (checkFnAllocKind(CB, AllocFnKind::Realloc)) 334 return CB->getArgOperandWithAttribute(Attribute::AllocatedPointer); 335 return nullptr; 336 } 337 338 bool llvm::isRemovableAlloc(const CallBase *CB, const TargetLibraryInfo *TLI) { 339 // Note: Removability is highly dependent on the source language. For 340 // example, recent C++ requires direct calls to the global allocation 341 // [basic.stc.dynamic.allocation] to be observable unless part of a new 342 // expression [expr.new paragraph 13]. 343 344 // Historically we've treated the C family allocation routines and operator 345 // new as removable 346 return isAllocLikeFn(CB, TLI); 347 } 348 349 Value *llvm::getAllocAlignment(const CallBase *V, 350 const TargetLibraryInfo *TLI) { 351 const std::optional<AllocFnsTy> FnData = getAllocationData(V, AnyAlloc, TLI); 352 if (FnData && FnData->AlignParam >= 0) { 353 return V->getOperand(FnData->AlignParam); 354 } 355 return V->getArgOperandWithAttribute(Attribute::AllocAlign); 356 } 357 358 /// When we're compiling N-bit code, and the user uses parameters that are 359 /// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into 360 /// trouble with APInt size issues. This function handles resizing + overflow 361 /// checks for us. Check and zext or trunc \p I depending on IntTyBits and 362 /// I's value. 363 static bool CheckedZextOrTrunc(APInt &I, unsigned IntTyBits) { 364 // More bits than we can handle. Checking the bit width isn't necessary, but 365 // it's faster than checking active bits, and should give `false` in the 366 // vast majority of cases. 367 if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits) 368 return false; 369 if (I.getBitWidth() != IntTyBits) 370 I = I.zextOrTrunc(IntTyBits); 371 return true; 372 } 373 374 std::optional<APInt> 375 llvm::getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI, 376 function_ref<const Value *(const Value *)> Mapper) { 377 // Note: This handles both explicitly listed allocation functions and 378 // allocsize. The code structure could stand to be cleaned up a bit. 379 std::optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI); 380 if (!FnData) 381 return std::nullopt; 382 383 // Get the index type for this address space, results and intermediate 384 // computations are performed at that width. 385 auto &DL = CB->getModule()->getDataLayout(); 386 const unsigned IntTyBits = DL.getIndexTypeSizeInBits(CB->getType()); 387 388 // Handle strdup-like functions separately. 389 if (FnData->AllocTy == StrDupLike) { 390 APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0)))); 391 if (!Size) 392 return std::nullopt; 393 394 // Strndup limits strlen. 395 if (FnData->FstParam > 0) { 396 const ConstantInt *Arg = 397 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 398 if (!Arg) 399 return std::nullopt; 400 401 APInt MaxSize = Arg->getValue().zext(IntTyBits); 402 if (Size.ugt(MaxSize)) 403 Size = MaxSize + 1; 404 } 405 return Size; 406 } 407 408 const ConstantInt *Arg = 409 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 410 if (!Arg) 411 return std::nullopt; 412 413 APInt Size = Arg->getValue(); 414 if (!CheckedZextOrTrunc(Size, IntTyBits)) 415 return std::nullopt; 416 417 // Size is determined by just 1 parameter. 418 if (FnData->SndParam < 0) 419 return Size; 420 421 Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam))); 422 if (!Arg) 423 return std::nullopt; 424 425 APInt NumElems = Arg->getValue(); 426 if (!CheckedZextOrTrunc(NumElems, IntTyBits)) 427 return std::nullopt; 428 429 bool Overflow; 430 Size = Size.umul_ov(NumElems, Overflow); 431 if (Overflow) 432 return std::nullopt; 433 return Size; 434 } 435 436 Constant *llvm::getInitialValueOfAllocation(const Value *V, 437 const TargetLibraryInfo *TLI, 438 Type *Ty) { 439 auto *Alloc = dyn_cast<CallBase>(V); 440 if (!Alloc) 441 return nullptr; 442 443 // malloc are uninitialized (undef) 444 if (getAllocationData(Alloc, MallocOrOpNewLike, TLI).has_value()) 445 return UndefValue::get(Ty); 446 447 AllocFnKind AK = getAllocFnKind(Alloc); 448 if ((AK & AllocFnKind::Uninitialized) != AllocFnKind::Unknown) 449 return UndefValue::get(Ty); 450 if ((AK & AllocFnKind::Zeroed) != AllocFnKind::Unknown) 451 return Constant::getNullValue(Ty); 452 453 return nullptr; 454 } 455 456 struct FreeFnsTy { 457 unsigned NumParams; 458 // Name of default allocator function to group malloc/free calls by family 459 MallocFamily Family; 460 }; 461 462 // clang-format off 463 static const std::pair<LibFunc, FreeFnsTy> FreeFnData[] = { 464 {LibFunc_ZdlPv, {1, MallocFamily::CPPNew}}, // operator delete(void*) 465 {LibFunc_ZdaPv, {1, MallocFamily::CPPNewArray}}, // operator delete[](void*) 466 {LibFunc_msvc_delete_ptr32, {1, MallocFamily::MSVCNew}}, // operator delete(void*) 467 {LibFunc_msvc_delete_ptr64, {1, MallocFamily::MSVCNew}}, // operator delete(void*) 468 {LibFunc_msvc_delete_array_ptr32, {1, MallocFamily::MSVCArrayNew}}, // operator delete[](void*) 469 {LibFunc_msvc_delete_array_ptr64, {1, MallocFamily::MSVCArrayNew}}, // operator delete[](void*) 470 {LibFunc_ZdlPvj, {2, MallocFamily::CPPNew}}, // delete(void*, uint) 471 {LibFunc_ZdlPvm, {2, MallocFamily::CPPNew}}, // delete(void*, ulong) 472 {LibFunc_ZdlPvRKSt9nothrow_t, {2, MallocFamily::CPPNew}}, // delete(void*, nothrow) 473 {LibFunc_ZdlPvSt11align_val_t, {2, MallocFamily::CPPNewAligned}}, // delete(void*, align_val_t) 474 {LibFunc_ZdaPvj, {2, MallocFamily::CPPNewArray}}, // delete[](void*, uint) 475 {LibFunc_ZdaPvm, {2, MallocFamily::CPPNewArray}}, // delete[](void*, ulong) 476 {LibFunc_ZdaPvRKSt9nothrow_t, {2, MallocFamily::CPPNewArray}}, // delete[](void*, nothrow) 477 {LibFunc_ZdaPvSt11align_val_t, {2, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t) 478 {LibFunc_msvc_delete_ptr32_int, {2, MallocFamily::MSVCNew}}, // delete(void*, uint) 479 {LibFunc_msvc_delete_ptr64_longlong, {2, MallocFamily::MSVCNew}}, // delete(void*, ulonglong) 480 {LibFunc_msvc_delete_ptr32_nothrow, {2, MallocFamily::MSVCNew}}, // delete(void*, nothrow) 481 {LibFunc_msvc_delete_ptr64_nothrow, {2, MallocFamily::MSVCNew}}, // delete(void*, nothrow) 482 {LibFunc_msvc_delete_array_ptr32_int, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, uint) 483 {LibFunc_msvc_delete_array_ptr64_longlong, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, ulonglong) 484 {LibFunc_msvc_delete_array_ptr32_nothrow, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, nothrow) 485 {LibFunc_msvc_delete_array_ptr64_nothrow, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, nothrow) 486 {LibFunc___kmpc_free_shared, {2, MallocFamily::KmpcAllocShared}}, // OpenMP Offloading RTL free 487 {LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, align_val_t, nothrow) 488 {LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t, nothrow) 489 {LibFunc_ZdlPvjSt11align_val_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, unsigned int, align_val_t) 490 {LibFunc_ZdlPvmSt11align_val_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, unsigned long, align_val_t) 491 {LibFunc_ZdaPvjSt11align_val_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned int, align_val_t) 492 {LibFunc_ZdaPvmSt11align_val_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned long, align_val_t) 493 }; 494 // clang-format on 495 496 std::optional<FreeFnsTy> getFreeFunctionDataForFunction(const Function *Callee, 497 const LibFunc TLIFn) { 498 const auto *Iter = 499 find_if(FreeFnData, [TLIFn](const std::pair<LibFunc, FreeFnsTy> &P) { 500 return P.first == TLIFn; 501 }); 502 if (Iter == std::end(FreeFnData)) 503 return std::nullopt; 504 return Iter->second; 505 } 506 507 std::optional<StringRef> 508 llvm::getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI) { 509 bool IsNoBuiltin; 510 const Function *Callee = getCalledFunction(I, IsNoBuiltin); 511 if (Callee == nullptr || IsNoBuiltin) 512 return std::nullopt; 513 LibFunc TLIFn; 514 515 if (TLI && TLI->getLibFunc(*Callee, TLIFn) && TLI->has(TLIFn)) { 516 // Callee is some known library function. 517 const auto AllocData = getAllocationDataForFunction(Callee, AnyAlloc, TLI); 518 if (AllocData) 519 return mangledNameForMallocFamily(AllocData->Family); 520 const auto FreeData = getFreeFunctionDataForFunction(Callee, TLIFn); 521 if (FreeData) 522 return mangledNameForMallocFamily(FreeData->Family); 523 } 524 // Callee isn't a known library function, still check attributes. 525 if (checkFnAllocKind(I, AllocFnKind::Free | AllocFnKind::Alloc | 526 AllocFnKind::Realloc)) { 527 Attribute Attr = cast<CallBase>(I)->getFnAttr("alloc-family"); 528 if (Attr.isValid()) 529 return Attr.getValueAsString(); 530 } 531 return std::nullopt; 532 } 533 534 /// isLibFreeFunction - Returns true if the function is a builtin free() 535 bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) { 536 std::optional<FreeFnsTy> FnData = getFreeFunctionDataForFunction(F, TLIFn); 537 if (!FnData) 538 return checkFnAllocKind(F, AllocFnKind::Free); 539 540 // Check free prototype. 541 // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin 542 // attribute will exist. 543 FunctionType *FTy = F->getFunctionType(); 544 if (!FTy->getReturnType()->isVoidTy()) 545 return false; 546 if (FTy->getNumParams() != FnData->NumParams) 547 return false; 548 if (!FTy->getParamType(0)->isPointerTy()) 549 return false; 550 551 return true; 552 } 553 554 Value *llvm::getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI) { 555 bool IsNoBuiltinCall; 556 const Function *Callee = getCalledFunction(CB, IsNoBuiltinCall); 557 if (Callee == nullptr || IsNoBuiltinCall) 558 return nullptr; 559 560 LibFunc TLIFn; 561 if (TLI && TLI->getLibFunc(*Callee, TLIFn) && TLI->has(TLIFn) && 562 isLibFreeFunction(Callee, TLIFn)) { 563 // All currently supported free functions free the first argument. 564 return CB->getArgOperand(0); 565 } 566 567 if (checkFnAllocKind(CB, AllocFnKind::Free)) 568 return CB->getArgOperandWithAttribute(Attribute::AllocatedPointer); 569 570 return nullptr; 571 } 572 573 //===----------------------------------------------------------------------===// 574 // Utility functions to compute size of objects. 575 // 576 static APInt getSizeWithOverflow(const SizeOffsetType &Data) { 577 if (Data.second.isNegative() || Data.first.ult(Data.second)) 578 return APInt(Data.first.getBitWidth(), 0); 579 return Data.first - Data.second; 580 } 581 582 /// Compute the size of the object pointed by Ptr. Returns true and the 583 /// object size in Size if successful, and false otherwise. 584 /// If RoundToAlign is true, then Size is rounded up to the alignment of 585 /// allocas, byval arguments, and global variables. 586 bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, 587 const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) { 588 ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts); 589 SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); 590 if (!Visitor.bothKnown(Data)) 591 return false; 592 593 Size = getSizeWithOverflow(Data).getZExtValue(); 594 return true; 595 } 596 597 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 598 const DataLayout &DL, 599 const TargetLibraryInfo *TLI, 600 bool MustSucceed) { 601 return lowerObjectSizeCall(ObjectSize, DL, TLI, /*AAResults=*/nullptr, 602 MustSucceed); 603 } 604 605 Value *llvm::lowerObjectSizeCall( 606 IntrinsicInst *ObjectSize, const DataLayout &DL, 607 const TargetLibraryInfo *TLI, AAResults *AA, bool MustSucceed, 608 SmallVectorImpl<Instruction *> *InsertedInstructions) { 609 assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize && 610 "ObjectSize must be a call to llvm.objectsize!"); 611 612 bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero(); 613 ObjectSizeOpts EvalOptions; 614 EvalOptions.AA = AA; 615 616 // Unless we have to fold this to something, try to be as accurate as 617 // possible. 618 if (MustSucceed) 619 EvalOptions.EvalMode = 620 MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min; 621 else 622 EvalOptions.EvalMode = ObjectSizeOpts::Mode::ExactSizeFromOffset; 623 624 EvalOptions.NullIsUnknownSize = 625 cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne(); 626 627 auto *ResultType = cast<IntegerType>(ObjectSize->getType()); 628 bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero(); 629 if (StaticOnly) { 630 // FIXME: Does it make sense to just return a failure value if the size won't 631 // fit in the output and `!MustSucceed`? 632 uint64_t Size; 633 if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) && 634 isUIntN(ResultType->getBitWidth(), Size)) 635 return ConstantInt::get(ResultType, Size); 636 } else { 637 LLVMContext &Ctx = ObjectSize->getFunction()->getContext(); 638 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions); 639 SizeOffsetEvalType SizeOffsetPair = 640 Eval.compute(ObjectSize->getArgOperand(0)); 641 642 if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) { 643 IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder( 644 Ctx, TargetFolder(DL), IRBuilderCallbackInserter([&](Instruction *I) { 645 if (InsertedInstructions) 646 InsertedInstructions->push_back(I); 647 })); 648 Builder.SetInsertPoint(ObjectSize); 649 650 // If we've outside the end of the object, then we can always access 651 // exactly 0 bytes. 652 Value *ResultSize = 653 Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); 654 Value *UseZero = 655 Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); 656 ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); 657 Value *Ret = Builder.CreateSelect( 658 UseZero, ConstantInt::get(ResultType, 0), ResultSize); 659 660 // The non-constant size expression cannot evaluate to -1. 661 if (!isa<Constant>(SizeOffsetPair.first) || 662 !isa<Constant>(SizeOffsetPair.second)) 663 Builder.CreateAssumption( 664 Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1))); 665 666 return Ret; 667 } 668 } 669 670 if (!MustSucceed) 671 return nullptr; 672 673 return ConstantInt::get(ResultType, MaxVal ? -1ULL : 0); 674 } 675 676 STATISTIC(ObjectVisitorArgument, 677 "Number of arguments with unsolved size and offset"); 678 STATISTIC(ObjectVisitorLoad, 679 "Number of load instructions with unsolved size and offset"); 680 681 APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) { 682 if (Options.RoundToAlign && Alignment) 683 return APInt(IntTyBits, alignTo(Size.getZExtValue(), *Alignment)); 684 return Size; 685 } 686 687 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, 688 const TargetLibraryInfo *TLI, 689 LLVMContext &Context, 690 ObjectSizeOpts Options) 691 : DL(DL), TLI(TLI), Options(Options) { 692 // Pointer size must be rechecked for each object visited since it could have 693 // a different address space. 694 } 695 696 SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { 697 unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 698 699 // Stripping pointer casts can strip address space casts which can change the 700 // index type size. The invariant is that we use the value type to determine 701 // the index type size and if we stripped address space casts we have to 702 // readjust the APInt as we pass it upwards in order for the APInt to match 703 // the type the caller passed in. 704 APInt Offset(InitialIntTyBits, 0); 705 V = V->stripAndAccumulateConstantOffsets( 706 DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true); 707 708 // Later we use the index type size and zero but it will match the type of the 709 // value that is passed to computeImpl. 710 IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 711 Zero = APInt::getZero(IntTyBits); 712 713 bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits; 714 if (!IndexTypeSizeChanged && Offset.isZero()) 715 return computeImpl(V); 716 717 // We stripped an address space cast that changed the index type size or we 718 // accumulated some constant offset (or both). Readjust the bit width to match 719 // the argument index type size and apply the offset, as required. 720 SizeOffsetType SOT = computeImpl(V); 721 if (IndexTypeSizeChanged) { 722 if (knownSize(SOT) && !::CheckedZextOrTrunc(SOT.first, InitialIntTyBits)) 723 SOT.first = APInt(); 724 if (knownOffset(SOT) && !::CheckedZextOrTrunc(SOT.second, InitialIntTyBits)) 725 SOT.second = APInt(); 726 } 727 // If the computed offset is "unknown" we cannot add the stripped offset. 728 return {SOT.first, 729 SOT.second.getBitWidth() > 1 ? SOT.second + Offset : SOT.second}; 730 } 731 732 SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { 733 if (Instruction *I = dyn_cast<Instruction>(V)) { 734 // If we have already seen this instruction, bail out. Cycles can happen in 735 // unreachable code after constant propagation. 736 auto P = SeenInsts.try_emplace(I, unknown()); 737 if (!P.second) 738 return P.first->second; 739 SizeOffsetType Res = visit(*I); 740 // Cache the result for later visits. If we happened to visit this during 741 // the above recursion, we would consider it unknown until now. 742 SeenInsts[I] = Res; 743 return Res; 744 } 745 if (Argument *A = dyn_cast<Argument>(V)) 746 return visitArgument(*A); 747 if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) 748 return visitConstantPointerNull(*P); 749 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 750 return visitGlobalAlias(*GA); 751 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 752 return visitGlobalVariable(*GV); 753 if (UndefValue *UV = dyn_cast<UndefValue>(V)) 754 return visitUndefValue(*UV); 755 756 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " 757 << *V << '\n'); 758 return unknown(); 759 } 760 761 bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) { 762 return ::CheckedZextOrTrunc(I, IntTyBits); 763 } 764 765 SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { 766 TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType()); 767 if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min) 768 return unknown(); 769 APInt Size(IntTyBits, ElemSize.getKnownMinValue()); 770 if (!I.isArrayAllocation()) 771 return std::make_pair(align(Size, I.getAlign()), Zero); 772 773 Value *ArraySize = I.getArraySize(); 774 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { 775 APInt NumElems = C->getValue(); 776 if (!CheckedZextOrTrunc(NumElems)) 777 return unknown(); 778 779 bool Overflow; 780 Size = Size.umul_ov(NumElems, Overflow); 781 return Overflow ? unknown() 782 : std::make_pair(align(Size, I.getAlign()), Zero); 783 } 784 return unknown(); 785 } 786 787 SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { 788 Type *MemoryTy = A.getPointeeInMemoryValueType(); 789 // No interprocedural analysis is done at the moment. 790 if (!MemoryTy|| !MemoryTy->isSized()) { 791 ++ObjectVisitorArgument; 792 return unknown(); 793 } 794 795 APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy)); 796 return std::make_pair(align(Size, A.getParamAlign()), Zero); 797 } 798 799 SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { 800 if (std::optional<APInt> Size = getAllocSize(&CB, TLI)) 801 return std::make_pair(*Size, Zero); 802 return unknown(); 803 } 804 805 SizeOffsetType 806 ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { 807 // If null is unknown, there's nothing we can do. Additionally, non-zero 808 // address spaces can make use of null, so we don't presume to know anything 809 // about that. 810 // 811 // TODO: How should this work with address space casts? We currently just drop 812 // them on the floor, but it's unclear what we should do when a NULL from 813 // addrspace(1) gets casted to addrspace(0) (or vice-versa). 814 if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace()) 815 return unknown(); 816 return std::make_pair(Zero, Zero); 817 } 818 819 SizeOffsetType 820 ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { 821 return unknown(); 822 } 823 824 SizeOffsetType 825 ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { 826 // Easy cases were already folded by previous passes. 827 return unknown(); 828 } 829 830 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { 831 if (GA.isInterposable()) 832 return unknown(); 833 return compute(GA.getAliasee()); 834 } 835 836 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ 837 if (!GV.getValueType()->isSized() || GV.hasExternalWeakLinkage() || 838 ((!GV.hasInitializer() || GV.isInterposable()) && 839 Options.EvalMode != ObjectSizeOpts::Mode::Min)) 840 return unknown(); 841 842 APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType())); 843 return std::make_pair(align(Size, GV.getAlign()), Zero); 844 } 845 846 SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { 847 // clueless 848 return unknown(); 849 } 850 851 SizeOffsetType ObjectSizeOffsetVisitor::findLoadSizeOffset( 852 LoadInst &Load, BasicBlock &BB, BasicBlock::iterator From, 853 SmallDenseMap<BasicBlock *, SizeOffsetType, 8> &VisitedBlocks, 854 unsigned &ScannedInstCount) { 855 constexpr unsigned MaxInstsToScan = 128; 856 857 auto Where = VisitedBlocks.find(&BB); 858 if (Where != VisitedBlocks.end()) 859 return Where->second; 860 861 auto Unknown = [this, &BB, &VisitedBlocks]() { 862 return VisitedBlocks[&BB] = unknown(); 863 }; 864 auto Known = [&BB, &VisitedBlocks](SizeOffsetType SO) { 865 return VisitedBlocks[&BB] = SO; 866 }; 867 868 do { 869 Instruction &I = *From; 870 871 if (I.isDebugOrPseudoInst()) 872 continue; 873 874 if (++ScannedInstCount > MaxInstsToScan) 875 return Unknown(); 876 877 if (!I.mayWriteToMemory()) 878 continue; 879 880 if (auto *SI = dyn_cast<StoreInst>(&I)) { 881 AliasResult AR = 882 Options.AA->alias(SI->getPointerOperand(), Load.getPointerOperand()); 883 switch ((AliasResult::Kind)AR) { 884 case AliasResult::NoAlias: 885 continue; 886 case AliasResult::MustAlias: 887 if (SI->getValueOperand()->getType()->isPointerTy()) 888 return Known(compute(SI->getValueOperand())); 889 else 890 return Unknown(); // No handling of non-pointer values by `compute`. 891 default: 892 return Unknown(); 893 } 894 } 895 896 if (auto *CB = dyn_cast<CallBase>(&I)) { 897 Function *Callee = CB->getCalledFunction(); 898 // Bail out on indirect call. 899 if (!Callee) 900 return Unknown(); 901 902 LibFunc TLIFn; 903 if (!TLI || !TLI->getLibFunc(*CB->getCalledFunction(), TLIFn) || 904 !TLI->has(TLIFn)) 905 return Unknown(); 906 907 // TODO: There's probably more interesting case to support here. 908 if (TLIFn != LibFunc_posix_memalign) 909 return Unknown(); 910 911 AliasResult AR = 912 Options.AA->alias(CB->getOperand(0), Load.getPointerOperand()); 913 switch ((AliasResult::Kind)AR) { 914 case AliasResult::NoAlias: 915 continue; 916 case AliasResult::MustAlias: 917 break; 918 default: 919 return Unknown(); 920 } 921 922 // Is the error status of posix_memalign correctly checked? If not it 923 // would be incorrect to assume it succeeds and load doesn't see the 924 // previous value. 925 std::optional<bool> Checked = isImpliedByDomCondition( 926 ICmpInst::ICMP_EQ, CB, ConstantInt::get(CB->getType(), 0), &Load, DL); 927 if (!Checked || !*Checked) 928 return Unknown(); 929 930 Value *Size = CB->getOperand(2); 931 auto *C = dyn_cast<ConstantInt>(Size); 932 if (!C) 933 return Unknown(); 934 935 return Known({C->getValue(), APInt(C->getValue().getBitWidth(), 0)}); 936 } 937 938 return Unknown(); 939 } while (From-- != BB.begin()); 940 941 SmallVector<SizeOffsetType> PredecessorSizeOffsets; 942 for (auto *PredBB : predecessors(&BB)) { 943 PredecessorSizeOffsets.push_back(findLoadSizeOffset( 944 Load, *PredBB, BasicBlock::iterator(PredBB->getTerminator()), 945 VisitedBlocks, ScannedInstCount)); 946 if (!bothKnown(PredecessorSizeOffsets.back())) 947 return Unknown(); 948 } 949 950 if (PredecessorSizeOffsets.empty()) 951 return Unknown(); 952 953 return Known(std::accumulate(PredecessorSizeOffsets.begin() + 1, 954 PredecessorSizeOffsets.end(), 955 PredecessorSizeOffsets.front(), 956 [this](SizeOffsetType LHS, SizeOffsetType RHS) { 957 return combineSizeOffset(LHS, RHS); 958 })); 959 } 960 961 SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst &LI) { 962 if (!Options.AA) { 963 ++ObjectVisitorLoad; 964 return unknown(); 965 } 966 967 SmallDenseMap<BasicBlock *, SizeOffsetType, 8> VisitedBlocks; 968 unsigned ScannedInstCount = 0; 969 SizeOffsetType SO = 970 findLoadSizeOffset(LI, *LI.getParent(), BasicBlock::iterator(LI), 971 VisitedBlocks, ScannedInstCount); 972 if (!bothKnown(SO)) 973 ++ObjectVisitorLoad; 974 return SO; 975 } 976 977 SizeOffsetType ObjectSizeOffsetVisitor::combineSizeOffset(SizeOffsetType LHS, 978 SizeOffsetType RHS) { 979 if (!bothKnown(LHS) || !bothKnown(RHS)) 980 return unknown(); 981 982 switch (Options.EvalMode) { 983 case ObjectSizeOpts::Mode::Min: 984 return (getSizeWithOverflow(LHS).slt(getSizeWithOverflow(RHS))) ? LHS : RHS; 985 case ObjectSizeOpts::Mode::Max: 986 return (getSizeWithOverflow(LHS).sgt(getSizeWithOverflow(RHS))) ? LHS : RHS; 987 case ObjectSizeOpts::Mode::ExactSizeFromOffset: 988 return (getSizeWithOverflow(LHS).eq(getSizeWithOverflow(RHS))) ? LHS 989 : unknown(); 990 case ObjectSizeOpts::Mode::ExactUnderlyingSizeAndOffset: 991 return LHS == RHS ? LHS : unknown(); 992 } 993 llvm_unreachable("missing an eval mode"); 994 } 995 996 SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode &PN) { 997 if (PN.getNumIncomingValues() == 0) 998 return unknown(); 999 auto IncomingValues = PN.incoming_values(); 1000 return std::accumulate(IncomingValues.begin() + 1, IncomingValues.end(), 1001 compute(*IncomingValues.begin()), 1002 [this](SizeOffsetType LHS, Value *VRHS) { 1003 return combineSizeOffset(LHS, compute(VRHS)); 1004 }); 1005 } 1006 1007 SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { 1008 return combineSizeOffset(compute(I.getTrueValue()), 1009 compute(I.getFalseValue())); 1010 } 1011 1012 SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { 1013 return std::make_pair(Zero, Zero); 1014 } 1015 1016 SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { 1017 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I 1018 << '\n'); 1019 return unknown(); 1020 } 1021 1022 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( 1023 const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, 1024 ObjectSizeOpts EvalOpts) 1025 : DL(DL), TLI(TLI), Context(Context), 1026 Builder(Context, TargetFolder(DL), 1027 IRBuilderCallbackInserter( 1028 [&](Instruction *I) { InsertedInstructions.insert(I); })), 1029 EvalOpts(EvalOpts) { 1030 // IntTy and Zero must be set for each compute() since the address space may 1031 // be different for later objects. 1032 } 1033 1034 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { 1035 // XXX - Are vectors of pointers possible here? 1036 IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); 1037 Zero = ConstantInt::get(IntTy, 0); 1038 1039 SizeOffsetEvalType Result = compute_(V); 1040 1041 if (!bothKnown(Result)) { 1042 // Erase everything that was computed in this iteration from the cache, so 1043 // that no dangling references are left behind. We could be a bit smarter if 1044 // we kept a dependency graph. It's probably not worth the complexity. 1045 for (const Value *SeenVal : SeenVals) { 1046 CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal); 1047 // non-computable results can be safely cached 1048 if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) 1049 CacheMap.erase(CacheIt); 1050 } 1051 1052 // Erase any instructions we inserted as part of the traversal. 1053 for (Instruction *I : InsertedInstructions) { 1054 I->replaceAllUsesWith(PoisonValue::get(I->getType())); 1055 I->eraseFromParent(); 1056 } 1057 } 1058 1059 SeenVals.clear(); 1060 InsertedInstructions.clear(); 1061 return Result; 1062 } 1063 1064 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { 1065 ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts); 1066 SizeOffsetType Const = Visitor.compute(V); 1067 if (Visitor.bothKnown(Const)) 1068 return std::make_pair(ConstantInt::get(Context, Const.first), 1069 ConstantInt::get(Context, Const.second)); 1070 1071 V = V->stripPointerCasts(); 1072 1073 // Check cache. 1074 CacheMapTy::iterator CacheIt = CacheMap.find(V); 1075 if (CacheIt != CacheMap.end()) 1076 return CacheIt->second; 1077 1078 // Always generate code immediately before the instruction being 1079 // processed, so that the generated code dominates the same BBs. 1080 BuilderTy::InsertPointGuard Guard(Builder); 1081 if (Instruction *I = dyn_cast<Instruction>(V)) 1082 Builder.SetInsertPoint(I); 1083 1084 // Now compute the size and offset. 1085 SizeOffsetEvalType Result; 1086 1087 // Record the pointers that were handled in this run, so that they can be 1088 // cleaned later if something fails. We also use this set to break cycles that 1089 // can occur in dead code. 1090 if (!SeenVals.insert(V).second) { 1091 Result = unknown(); 1092 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 1093 Result = visitGEPOperator(*GEP); 1094 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 1095 Result = visit(*I); 1096 } else if (isa<Argument>(V) || 1097 (isa<ConstantExpr>(V) && 1098 cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) || 1099 isa<GlobalAlias>(V) || 1100 isa<GlobalVariable>(V)) { 1101 // Ignore values where we cannot do more than ObjectSizeVisitor. 1102 Result = unknown(); 1103 } else { 1104 LLVM_DEBUG( 1105 dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V 1106 << '\n'); 1107 Result = unknown(); 1108 } 1109 1110 // Don't reuse CacheIt since it may be invalid at this point. 1111 CacheMap[V] = Result; 1112 return Result; 1113 } 1114 1115 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { 1116 if (!I.getAllocatedType()->isSized()) 1117 return unknown(); 1118 1119 // must be a VLA 1120 assert(I.isArrayAllocation()); 1121 1122 // If needed, adjust the alloca's operand size to match the pointer indexing 1123 // size. Subsequent math operations expect the types to match. 1124 Value *ArraySize = Builder.CreateZExtOrTrunc( 1125 I.getArraySize(), 1126 DL.getIndexType(I.getContext(), DL.getAllocaAddrSpace())); 1127 assert(ArraySize->getType() == Zero->getType() && 1128 "Expected zero constant to have pointer index type"); 1129 1130 Value *Size = ConstantInt::get(ArraySize->getType(), 1131 DL.getTypeAllocSize(I.getAllocatedType())); 1132 Size = Builder.CreateMul(Size, ArraySize); 1133 return std::make_pair(Size, Zero); 1134 } 1135 1136 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { 1137 std::optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); 1138 if (!FnData) 1139 return unknown(); 1140 1141 // Handle strdup-like functions separately. 1142 if (FnData->AllocTy == StrDupLike) { 1143 // TODO: implement evaluation of strdup/strndup 1144 return unknown(); 1145 } 1146 1147 Value *FirstArg = CB.getArgOperand(FnData->FstParam); 1148 FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); 1149 if (FnData->SndParam < 0) 1150 return std::make_pair(FirstArg, Zero); 1151 1152 Value *SecondArg = CB.getArgOperand(FnData->SndParam); 1153 SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); 1154 Value *Size = Builder.CreateMul(FirstArg, SecondArg); 1155 return std::make_pair(Size, Zero); 1156 } 1157 1158 SizeOffsetEvalType 1159 ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { 1160 return unknown(); 1161 } 1162 1163 SizeOffsetEvalType 1164 ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { 1165 return unknown(); 1166 } 1167 1168 SizeOffsetEvalType 1169 ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { 1170 SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); 1171 if (!bothKnown(PtrData)) 1172 return unknown(); 1173 1174 Value *Offset = emitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); 1175 Offset = Builder.CreateAdd(PtrData.second, Offset); 1176 return std::make_pair(PtrData.first, Offset); 1177 } 1178 1179 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { 1180 // clueless 1181 return unknown(); 1182 } 1183 1184 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst &LI) { 1185 return unknown(); 1186 } 1187 1188 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { 1189 // Create 2 PHIs: one for size and another for offset. 1190 PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1191 PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1192 1193 // Insert right away in the cache to handle recursive PHIs. 1194 CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); 1195 1196 // Compute offset/size for each PHI incoming pointer. 1197 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { 1198 BasicBlock *IncomingBlock = PHI.getIncomingBlock(i); 1199 Builder.SetInsertPoint(IncomingBlock, IncomingBlock->getFirstInsertionPt()); 1200 SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); 1201 1202 if (!bothKnown(EdgeData)) { 1203 OffsetPHI->replaceAllUsesWith(PoisonValue::get(IntTy)); 1204 OffsetPHI->eraseFromParent(); 1205 InsertedInstructions.erase(OffsetPHI); 1206 SizePHI->replaceAllUsesWith(PoisonValue::get(IntTy)); 1207 SizePHI->eraseFromParent(); 1208 InsertedInstructions.erase(SizePHI); 1209 return unknown(); 1210 } 1211 SizePHI->addIncoming(EdgeData.first, IncomingBlock); 1212 OffsetPHI->addIncoming(EdgeData.second, IncomingBlock); 1213 } 1214 1215 Value *Size = SizePHI, *Offset = OffsetPHI; 1216 if (Value *Tmp = SizePHI->hasConstantValue()) { 1217 Size = Tmp; 1218 SizePHI->replaceAllUsesWith(Size); 1219 SizePHI->eraseFromParent(); 1220 InsertedInstructions.erase(SizePHI); 1221 } 1222 if (Value *Tmp = OffsetPHI->hasConstantValue()) { 1223 Offset = Tmp; 1224 OffsetPHI->replaceAllUsesWith(Offset); 1225 OffsetPHI->eraseFromParent(); 1226 InsertedInstructions.erase(OffsetPHI); 1227 } 1228 return std::make_pair(Size, Offset); 1229 } 1230 1231 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { 1232 SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); 1233 SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); 1234 1235 if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) 1236 return unknown(); 1237 if (TrueSide == FalseSide) 1238 return TrueSide; 1239 1240 Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, 1241 FalseSide.first); 1242 Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, 1243 FalseSide.second); 1244 return std::make_pair(Size, Offset); 1245 } 1246 1247 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { 1248 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I 1249 << '\n'); 1250 return unknown(); 1251 } 1252