xref: /llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp (revision e4db3f0d97681a10a76e71465f1379801cd45f54)
1 //===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions identifies calls to builtin functions that allocate
10 // or free memory.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/MemoryBuiltins.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/TargetFolder.h"
20 #include "llvm/Analysis/TargetLibraryInfo.h"
21 #include "llvm/Analysis/Utils/Local.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Argument.h"
24 #include "llvm/IR/Attributes.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/GlobalAlias.h"
30 #include "llvm/IR/GlobalVariable.h"
31 #include "llvm/IR/Instruction.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/Casting.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include <cassert>
43 #include <cstdint>
44 #include <iterator>
45 #include <numeric>
46 #include <optional>
47 #include <utility>
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "memory-builtins"
52 
53 static cl::opt<unsigned> ObjectSizeOffsetVisitorMaxVisitInstructions(
54     "object-size-offset-visitor-max-visit-instructions",
55     cl::desc("Maximum number of instructions for ObjectSizeOffsetVisitor to "
56              "look at"),
57     cl::init(100));
58 
59 enum AllocType : uint8_t {
60   OpNewLike          = 1<<0, // allocates; never returns null
61   MallocLike         = 1<<1, // allocates; may return null
62   StrDupLike         = 1<<2,
63   MallocOrOpNewLike  = MallocLike | OpNewLike,
64   AllocLike          = MallocOrOpNewLike | StrDupLike,
65   AnyAlloc           = AllocLike
66 };
67 
68 enum class MallocFamily {
69   Malloc,
70   CPPNew,             // new(unsigned int)
71   CPPNewAligned,      // new(unsigned int, align_val_t)
72   CPPNewArray,        // new[](unsigned int)
73   CPPNewArrayAligned, // new[](unsigned long, align_val_t)
74   MSVCNew,            // new(unsigned int)
75   MSVCArrayNew,       // new[](unsigned int)
76   VecMalloc,
77   KmpcAllocShared,
78 };
79 
80 StringRef mangledNameForMallocFamily(const MallocFamily &Family) {
81   switch (Family) {
82   case MallocFamily::Malloc:
83     return "malloc";
84   case MallocFamily::CPPNew:
85     return "_Znwm";
86   case MallocFamily::CPPNewAligned:
87     return "_ZnwmSt11align_val_t";
88   case MallocFamily::CPPNewArray:
89     return "_Znam";
90   case MallocFamily::CPPNewArrayAligned:
91     return "_ZnamSt11align_val_t";
92   case MallocFamily::MSVCNew:
93     return "??2@YAPAXI@Z";
94   case MallocFamily::MSVCArrayNew:
95     return "??_U@YAPAXI@Z";
96   case MallocFamily::VecMalloc:
97     return "vec_malloc";
98   case MallocFamily::KmpcAllocShared:
99     return "__kmpc_alloc_shared";
100   }
101   llvm_unreachable("missing an alloc family");
102 }
103 
104 struct AllocFnsTy {
105   AllocType AllocTy;
106   unsigned NumParams;
107   // First and Second size parameters (or -1 if unused)
108   int FstParam, SndParam;
109   // Alignment parameter for aligned_alloc and aligned new
110   int AlignParam;
111   // Name of default allocator function to group malloc/free calls by family
112   MallocFamily Family;
113 };
114 
115 // clang-format off
116 // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to
117 // know which functions are nounwind, noalias, nocapture parameters, etc.
118 static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = {
119     {LibFunc_Znwj,                              {OpNewLike,        1,  0, -1, -1, MallocFamily::CPPNew}},             // new(unsigned int)
120     {LibFunc_ZnwjRKSt9nothrow_t,                {MallocLike,       2,  0, -1, -1, MallocFamily::CPPNew}},             // new(unsigned int, nothrow)
121     {LibFunc_ZnwjSt11align_val_t,               {OpNewLike,        2,  0, -1,  1, MallocFamily::CPPNewAligned}},      // new(unsigned int, align_val_t)
122     {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, {MallocLike,       3,  0, -1,  1, MallocFamily::CPPNewAligned}},      // new(unsigned int, align_val_t, nothrow)
123     {LibFunc_Znwm,                              {OpNewLike,        1,  0, -1, -1, MallocFamily::CPPNew}},             // new(unsigned long)
124     {LibFunc_Znwm12__hot_cold_t,                  {OpNewLike,        2, 0,  -1, -1, MallocFamily::CPPNew}},             // new(unsigned long, __hot_cold_t)
125     {LibFunc_ZnwmRKSt9nothrow_t,                {MallocLike,       2,  0, -1, -1, MallocFamily::CPPNew}},             // new(unsigned long, nothrow)
126     {LibFunc_ZnwmRKSt9nothrow_t12__hot_cold_t,      {MallocLike,       3, 0,  -1, -1, MallocFamily::CPPNew}},             // new(unsigned long, nothrow, __hot_cold_t)
127     {LibFunc_ZnwmSt11align_val_t,               {OpNewLike,        2,  0, -1,  1, MallocFamily::CPPNewAligned}},      // new(unsigned long, align_val_t)
128     {LibFunc_ZnwmSt11align_val_t12__hot_cold_t,   {OpNewLike,        3, 0,  -1, 1, MallocFamily::CPPNewAligned}},       // new(unsigned long, align_val_t, __hot_cold_t)
129     {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, {MallocLike,       3,  0, -1,  1, MallocFamily::CPPNewAligned}},      // new(unsigned long, align_val_t, nothrow)
130     {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t, {MallocLike,  4, 0,  -1, 1, MallocFamily::CPPNewAligned}},            // new(unsigned long, align_val_t, nothrow, __hot_cold_t)
131     {LibFunc_Znaj,                              {OpNewLike,        1,  0, -1, -1, MallocFamily::CPPNewArray}},        // new[](unsigned int)
132     {LibFunc_ZnajRKSt9nothrow_t,                {MallocLike,       2,  0, -1, -1, MallocFamily::CPPNewArray}},        // new[](unsigned int, nothrow)
133     {LibFunc_ZnajSt11align_val_t,               {OpNewLike,        2,  0, -1,  1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t)
134     {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, {MallocLike,       3,  0, -1,  1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t, nothrow)
135     {LibFunc_Znam,                              {OpNewLike,        1,  0, -1, -1, MallocFamily::CPPNewArray}},        // new[](unsigned long)
136     {LibFunc_Znam12__hot_cold_t,                  {OpNewLike,        2, 0,  -1, -1, MallocFamily::CPPNew}},             // new[](unsigned long, __hot_cold_t)
137     {LibFunc_ZnamRKSt9nothrow_t,                {MallocLike,       2,  0, -1, -1, MallocFamily::CPPNewArray}},        // new[](unsigned long, nothrow)
138     {LibFunc_ZnamRKSt9nothrow_t12__hot_cold_t,      {MallocLike,       3, 0,  -1, -1, MallocFamily::CPPNew}},             // new[](unsigned long, nothrow, __hot_cold_t)
139     {LibFunc_ZnamSt11align_val_t,               {OpNewLike,        2,  0, -1,  1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t)
140     {LibFunc_ZnamSt11align_val_t12__hot_cold_t,   {OpNewLike,        3, 0,  -1, 1, MallocFamily::CPPNewAligned}},       // new[](unsigned long, align_val_t, __hot_cold_t)
141     {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, {MallocLike,       3,  0, -1,  1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t, nothrow)
142     {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t12__hot_cold_t, {MallocLike,  4, 0,  -1, 1, MallocFamily::CPPNewAligned}},            // new[](unsigned long, align_val_t, nothrow, __hot_cold_t)
143     {LibFunc_msvc_new_int,                      {OpNewLike,        1,  0, -1, -1, MallocFamily::MSVCNew}},            // new(unsigned int)
144     {LibFunc_msvc_new_int_nothrow,              {MallocLike,       2,  0, -1, -1, MallocFamily::MSVCNew}},            // new(unsigned int, nothrow)
145     {LibFunc_msvc_new_longlong,                 {OpNewLike,        1,  0, -1, -1, MallocFamily::MSVCNew}},            // new(unsigned long long)
146     {LibFunc_msvc_new_longlong_nothrow,         {MallocLike,       2,  0, -1, -1, MallocFamily::MSVCNew}},            // new(unsigned long long, nothrow)
147     {LibFunc_msvc_new_array_int,                {OpNewLike,        1,  0, -1, -1, MallocFamily::MSVCArrayNew}},       // new[](unsigned int)
148     {LibFunc_msvc_new_array_int_nothrow,        {MallocLike,       2,  0, -1, -1, MallocFamily::MSVCArrayNew}},       // new[](unsigned int, nothrow)
149     {LibFunc_msvc_new_array_longlong,           {OpNewLike,        1,  0, -1, -1, MallocFamily::MSVCArrayNew}},       // new[](unsigned long long)
150     {LibFunc_msvc_new_array_longlong_nothrow,   {MallocLike,       2,  0, -1, -1, MallocFamily::MSVCArrayNew}},       // new[](unsigned long long, nothrow)
151     {LibFunc_strdup,                            {StrDupLike,       1, -1, -1, -1, MallocFamily::Malloc}},
152     {LibFunc_dunder_strdup,                     {StrDupLike,       1, -1, -1, -1, MallocFamily::Malloc}},
153     {LibFunc_strndup,                           {StrDupLike,       2,  1, -1, -1, MallocFamily::Malloc}},
154     {LibFunc_dunder_strndup,                    {StrDupLike,       2,  1, -1, -1, MallocFamily::Malloc}},
155     {LibFunc___kmpc_alloc_shared,               {MallocLike,       1,  0, -1, -1, MallocFamily::KmpcAllocShared}},
156 };
157 // clang-format on
158 
159 static const Function *getCalledFunction(const Value *V) {
160   // Don't care about intrinsics in this case.
161   if (isa<IntrinsicInst>(V))
162     return nullptr;
163 
164   const auto *CB = dyn_cast<CallBase>(V);
165   if (!CB)
166     return nullptr;
167 
168   if (CB->isNoBuiltin())
169     return nullptr;
170 
171   return CB->getCalledFunction();
172 }
173 
174 /// Returns the allocation data for the given value if it's a call to a known
175 /// allocation function.
176 static std::optional<AllocFnsTy>
177 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy,
178                              const TargetLibraryInfo *TLI) {
179   // Don't perform a slow TLI lookup, if this function doesn't return a pointer
180   // and thus can't be an allocation function.
181   if (!Callee->getReturnType()->isPointerTy())
182     return std::nullopt;
183 
184   // Make sure that the function is available.
185   LibFunc TLIFn;
186   if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn))
187     return std::nullopt;
188 
189   const auto *Iter = find_if(
190       AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) {
191         return P.first == TLIFn;
192       });
193 
194   if (Iter == std::end(AllocationFnData))
195     return std::nullopt;
196 
197   const AllocFnsTy *FnData = &Iter->second;
198   if ((FnData->AllocTy & AllocTy) != FnData->AllocTy)
199     return std::nullopt;
200 
201   // Check function prototype.
202   int FstParam = FnData->FstParam;
203   int SndParam = FnData->SndParam;
204   FunctionType *FTy = Callee->getFunctionType();
205 
206   if (FTy->getReturnType()->isPointerTy() &&
207       FTy->getNumParams() == FnData->NumParams &&
208       (FstParam < 0 ||
209        (FTy->getParamType(FstParam)->isIntegerTy(32) ||
210         FTy->getParamType(FstParam)->isIntegerTy(64))) &&
211       (SndParam < 0 ||
212        FTy->getParamType(SndParam)->isIntegerTy(32) ||
213        FTy->getParamType(SndParam)->isIntegerTy(64)))
214     return *FnData;
215   return std::nullopt;
216 }
217 
218 static std::optional<AllocFnsTy>
219 getAllocationData(const Value *V, AllocType AllocTy,
220                   const TargetLibraryInfo *TLI) {
221   if (const Function *Callee = getCalledFunction(V))
222     return getAllocationDataForFunction(Callee, AllocTy, TLI);
223   return std::nullopt;
224 }
225 
226 static std::optional<AllocFnsTy>
227 getAllocationData(const Value *V, AllocType AllocTy,
228                   function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
229   if (const Function *Callee = getCalledFunction(V))
230     return getAllocationDataForFunction(
231         Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee)));
232   return std::nullopt;
233 }
234 
235 static std::optional<AllocFnsTy>
236 getAllocationSize(const CallBase *CB, const TargetLibraryInfo *TLI) {
237   if (const Function *Callee = getCalledFunction(CB)) {
238     // Prefer to use existing information over allocsize. This will give us an
239     // accurate AllocTy.
240     if (std::optional<AllocFnsTy> Data =
241             getAllocationDataForFunction(Callee, AnyAlloc, TLI))
242       return Data;
243   }
244 
245   Attribute Attr = CB->getFnAttr(Attribute::AllocSize);
246   if (Attr == Attribute())
247     return std::nullopt;
248 
249   std::pair<unsigned, std::optional<unsigned>> Args = Attr.getAllocSizeArgs();
250 
251   AllocFnsTy Result;
252   // Because allocsize only tells us how many bytes are allocated, we're not
253   // really allowed to assume anything, so we use MallocLike.
254   Result.AllocTy = MallocLike;
255   Result.NumParams = CB->arg_size();
256   Result.FstParam = Args.first;
257   Result.SndParam = Args.second.value_or(-1);
258   // Allocsize has no way to specify an alignment argument
259   Result.AlignParam = -1;
260   return Result;
261 }
262 
263 static AllocFnKind getAllocFnKind(const Value *V) {
264   if (const auto *CB = dyn_cast<CallBase>(V)) {
265     Attribute Attr = CB->getFnAttr(Attribute::AllocKind);
266     if (Attr.isValid())
267       return AllocFnKind(Attr.getValueAsInt());
268   }
269   return AllocFnKind::Unknown;
270 }
271 
272 static AllocFnKind getAllocFnKind(const Function *F) {
273   return F->getAttributes().getAllocKind();
274 }
275 
276 static bool checkFnAllocKind(const Value *V, AllocFnKind Wanted) {
277   return (getAllocFnKind(V) & Wanted) != AllocFnKind::Unknown;
278 }
279 
280 static bool checkFnAllocKind(const Function *F, AllocFnKind Wanted) {
281   return (getAllocFnKind(F) & Wanted) != AllocFnKind::Unknown;
282 }
283 
284 /// Tests if a value is a call or invoke to a library function that
285 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
286 /// like).
287 bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) {
288   return getAllocationData(V, AnyAlloc, TLI).has_value() ||
289          checkFnAllocKind(V, AllocFnKind::Alloc | AllocFnKind::Realloc);
290 }
291 bool llvm::isAllocationFn(
292     const Value *V,
293     function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
294   return getAllocationData(V, AnyAlloc, GetTLI).has_value() ||
295          checkFnAllocKind(V, AllocFnKind::Alloc | AllocFnKind::Realloc);
296 }
297 
298 /// Tests if a value is a call or invoke to a library function that
299 /// allocates memory via new.
300 bool llvm::isNewLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
301   return getAllocationData(V, OpNewLike, TLI).has_value();
302 }
303 
304 /// Tests if a value is a call or invoke to a library function that
305 /// allocates memory similar to malloc or calloc.
306 bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
307   // TODO: Function behavior does not match name.
308   return getAllocationData(V, MallocOrOpNewLike, TLI).has_value();
309 }
310 
311 /// Tests if a value is a call or invoke to a library function that
312 /// allocates memory (either malloc, calloc, or strdup like).
313 bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) {
314   return getAllocationData(V, AllocLike, TLI).has_value() ||
315          checkFnAllocKind(V, AllocFnKind::Alloc);
316 }
317 
318 /// Tests if a functions is a call or invoke to a library function that
319 /// reallocates memory (e.g., realloc).
320 bool llvm::isReallocLikeFn(const Function *F) {
321   return checkFnAllocKind(F, AllocFnKind::Realloc);
322 }
323 
324 Value *llvm::getReallocatedOperand(const CallBase *CB) {
325   if (checkFnAllocKind(CB, AllocFnKind::Realloc))
326     return CB->getArgOperandWithAttribute(Attribute::AllocatedPointer);
327   return nullptr;
328 }
329 
330 bool llvm::isRemovableAlloc(const CallBase *CB, const TargetLibraryInfo *TLI) {
331   // Note: Removability is highly dependent on the source language.  For
332   // example, recent C++ requires direct calls to the global allocation
333   // [basic.stc.dynamic.allocation] to be observable unless part of a new
334   // expression [expr.new paragraph 13].
335 
336   // Historically we've treated the C family allocation routines and operator
337   // new as removable
338   return isAllocLikeFn(CB, TLI);
339 }
340 
341 Value *llvm::getAllocAlignment(const CallBase *V,
342                                const TargetLibraryInfo *TLI) {
343   const std::optional<AllocFnsTy> FnData = getAllocationData(V, AnyAlloc, TLI);
344   if (FnData && FnData->AlignParam >= 0) {
345     return V->getOperand(FnData->AlignParam);
346   }
347   return V->getArgOperandWithAttribute(Attribute::AllocAlign);
348 }
349 
350 /// When we're compiling N-bit code, and the user uses parameters that are
351 /// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into
352 /// trouble with APInt size issues. This function handles resizing + overflow
353 /// checks for us. Check and zext or trunc \p I depending on IntTyBits and
354 /// I's value.
355 static bool CheckedZextOrTrunc(APInt &I, unsigned IntTyBits) {
356   // More bits than we can handle. Checking the bit width isn't necessary, but
357   // it's faster than checking active bits, and should give `false` in the
358   // vast majority of cases.
359   if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits)
360     return false;
361   if (I.getBitWidth() != IntTyBits)
362     I = I.zextOrTrunc(IntTyBits);
363   return true;
364 }
365 
366 std::optional<APInt>
367 llvm::getAllocSize(const CallBase *CB, const TargetLibraryInfo *TLI,
368                    function_ref<const Value *(const Value *)> Mapper) {
369   // Note: This handles both explicitly listed allocation functions and
370   // allocsize.  The code structure could stand to be cleaned up a bit.
371   std::optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI);
372   if (!FnData)
373     return std::nullopt;
374 
375   // Get the index type for this address space, results and intermediate
376   // computations are performed at that width.
377   auto &DL = CB->getDataLayout();
378   const unsigned IntTyBits = DL.getIndexTypeSizeInBits(CB->getType());
379 
380   // Handle strdup-like functions separately.
381   if (FnData->AllocTy == StrDupLike) {
382     APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0))));
383     if (!Size)
384       return std::nullopt;
385 
386     // Strndup limits strlen.
387     if (FnData->FstParam > 0) {
388       const ConstantInt *Arg =
389         dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam)));
390       if (!Arg)
391         return std::nullopt;
392 
393       APInt MaxSize = Arg->getValue().zext(IntTyBits);
394       if (Size.ugt(MaxSize))
395         Size = MaxSize + 1;
396     }
397     return Size;
398   }
399 
400   const ConstantInt *Arg =
401     dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam)));
402   if (!Arg)
403     return std::nullopt;
404 
405   APInt Size = Arg->getValue();
406   if (!CheckedZextOrTrunc(Size, IntTyBits))
407     return std::nullopt;
408 
409   // Size is determined by just 1 parameter.
410   if (FnData->SndParam < 0)
411     return Size;
412 
413   Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam)));
414   if (!Arg)
415     return std::nullopt;
416 
417   APInt NumElems = Arg->getValue();
418   if (!CheckedZextOrTrunc(NumElems, IntTyBits))
419     return std::nullopt;
420 
421   bool Overflow;
422   Size = Size.umul_ov(NumElems, Overflow);
423   if (Overflow)
424     return std::nullopt;
425   return Size;
426 }
427 
428 Constant *llvm::getInitialValueOfAllocation(const Value *V,
429                                             const TargetLibraryInfo *TLI,
430                                             Type *Ty) {
431   auto *Alloc = dyn_cast<CallBase>(V);
432   if (!Alloc)
433     return nullptr;
434 
435   // malloc are uninitialized (undef)
436   if (getAllocationData(Alloc, MallocOrOpNewLike, TLI).has_value())
437     return UndefValue::get(Ty);
438 
439   AllocFnKind AK = getAllocFnKind(Alloc);
440   if ((AK & AllocFnKind::Uninitialized) != AllocFnKind::Unknown)
441     return UndefValue::get(Ty);
442   if ((AK & AllocFnKind::Zeroed) != AllocFnKind::Unknown)
443     return Constant::getNullValue(Ty);
444 
445   return nullptr;
446 }
447 
448 struct FreeFnsTy {
449   unsigned NumParams;
450   // Name of default allocator function to group malloc/free calls by family
451   MallocFamily Family;
452 };
453 
454 // clang-format off
455 static const std::pair<LibFunc, FreeFnsTy> FreeFnData[] = {
456     {LibFunc_ZdlPv,                              {1, MallocFamily::CPPNew}},             // operator delete(void*)
457     {LibFunc_ZdaPv,                              {1, MallocFamily::CPPNewArray}},        // operator delete[](void*)
458     {LibFunc_msvc_delete_ptr32,                  {1, MallocFamily::MSVCNew}},            // operator delete(void*)
459     {LibFunc_msvc_delete_ptr64,                  {1, MallocFamily::MSVCNew}},            // operator delete(void*)
460     {LibFunc_msvc_delete_array_ptr32,            {1, MallocFamily::MSVCArrayNew}},       // operator delete[](void*)
461     {LibFunc_msvc_delete_array_ptr64,            {1, MallocFamily::MSVCArrayNew}},       // operator delete[](void*)
462     {LibFunc_ZdlPvj,                             {2, MallocFamily::CPPNew}},             // delete(void*, uint)
463     {LibFunc_ZdlPvm,                             {2, MallocFamily::CPPNew}},             // delete(void*, ulong)
464     {LibFunc_ZdlPvRKSt9nothrow_t,                {2, MallocFamily::CPPNew}},             // delete(void*, nothrow)
465     {LibFunc_ZdlPvSt11align_val_t,               {2, MallocFamily::CPPNewAligned}},      // delete(void*, align_val_t)
466     {LibFunc_ZdaPvj,                             {2, MallocFamily::CPPNewArray}},        // delete[](void*, uint)
467     {LibFunc_ZdaPvm,                             {2, MallocFamily::CPPNewArray}},        // delete[](void*, ulong)
468     {LibFunc_ZdaPvRKSt9nothrow_t,                {2, MallocFamily::CPPNewArray}},        // delete[](void*, nothrow)
469     {LibFunc_ZdaPvSt11align_val_t,               {2, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t)
470     {LibFunc_msvc_delete_ptr32_int,              {2, MallocFamily::MSVCNew}},            // delete(void*, uint)
471     {LibFunc_msvc_delete_ptr64_longlong,         {2, MallocFamily::MSVCNew}},            // delete(void*, ulonglong)
472     {LibFunc_msvc_delete_ptr32_nothrow,          {2, MallocFamily::MSVCNew}},            // delete(void*, nothrow)
473     {LibFunc_msvc_delete_ptr64_nothrow,          {2, MallocFamily::MSVCNew}},            // delete(void*, nothrow)
474     {LibFunc_msvc_delete_array_ptr32_int,        {2, MallocFamily::MSVCArrayNew}},       // delete[](void*, uint)
475     {LibFunc_msvc_delete_array_ptr64_longlong,   {2, MallocFamily::MSVCArrayNew}},       // delete[](void*, ulonglong)
476     {LibFunc_msvc_delete_array_ptr32_nothrow,    {2, MallocFamily::MSVCArrayNew}},       // delete[](void*, nothrow)
477     {LibFunc_msvc_delete_array_ptr64_nothrow,    {2, MallocFamily::MSVCArrayNew}},       // delete[](void*, nothrow)
478     {LibFunc___kmpc_free_shared,                 {2, MallocFamily::KmpcAllocShared}},    // OpenMP Offloading RTL free
479     {LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewAligned}},      // delete(void*, align_val_t, nothrow)
480     {LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t, nothrow)
481     {LibFunc_ZdlPvjSt11align_val_t,              {3, MallocFamily::CPPNewAligned}},      // delete(void*, unsigned int, align_val_t)
482     {LibFunc_ZdlPvmSt11align_val_t,              {3, MallocFamily::CPPNewAligned}},      // delete(void*, unsigned long, align_val_t)
483     {LibFunc_ZdaPvjSt11align_val_t,              {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned int, align_val_t)
484     {LibFunc_ZdaPvmSt11align_val_t,              {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned long, align_val_t)
485 };
486 // clang-format on
487 
488 std::optional<FreeFnsTy> getFreeFunctionDataForFunction(const Function *Callee,
489                                                         const LibFunc TLIFn) {
490   const auto *Iter =
491       find_if(FreeFnData, [TLIFn](const std::pair<LibFunc, FreeFnsTy> &P) {
492         return P.first == TLIFn;
493       });
494   if (Iter == std::end(FreeFnData))
495     return std::nullopt;
496   return Iter->second;
497 }
498 
499 std::optional<StringRef>
500 llvm::getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI) {
501   if (const Function *Callee = getCalledFunction(I)) {
502     LibFunc TLIFn;
503     if (TLI && TLI->getLibFunc(*Callee, TLIFn) && TLI->has(TLIFn)) {
504       // Callee is some known library function.
505       const auto AllocData =
506           getAllocationDataForFunction(Callee, AnyAlloc, TLI);
507       if (AllocData)
508         return mangledNameForMallocFamily(AllocData->Family);
509       const auto FreeData = getFreeFunctionDataForFunction(Callee, TLIFn);
510       if (FreeData)
511         return mangledNameForMallocFamily(FreeData->Family);
512     }
513   }
514 
515   // Callee isn't a known library function, still check attributes.
516   if (checkFnAllocKind(I, AllocFnKind::Free | AllocFnKind::Alloc |
517                               AllocFnKind::Realloc)) {
518     Attribute Attr = cast<CallBase>(I)->getFnAttr("alloc-family");
519     if (Attr.isValid())
520       return Attr.getValueAsString();
521   }
522   return std::nullopt;
523 }
524 
525 /// isLibFreeFunction - Returns true if the function is a builtin free()
526 bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) {
527   std::optional<FreeFnsTy> FnData = getFreeFunctionDataForFunction(F, TLIFn);
528   if (!FnData)
529     return checkFnAllocKind(F, AllocFnKind::Free);
530 
531   // Check free prototype.
532   // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
533   // attribute will exist.
534   FunctionType *FTy = F->getFunctionType();
535   if (!FTy->getReturnType()->isVoidTy())
536     return false;
537   if (FTy->getNumParams() != FnData->NumParams)
538     return false;
539   if (!FTy->getParamType(0)->isPointerTy())
540     return false;
541 
542   return true;
543 }
544 
545 Value *llvm::getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI) {
546   if (const Function *Callee = getCalledFunction(CB)) {
547     LibFunc TLIFn;
548     if (TLI && TLI->getLibFunc(*Callee, TLIFn) && TLI->has(TLIFn) &&
549         isLibFreeFunction(Callee, TLIFn)) {
550       // All currently supported free functions free the first argument.
551       return CB->getArgOperand(0);
552     }
553   }
554 
555   if (checkFnAllocKind(CB, AllocFnKind::Free))
556     return CB->getArgOperandWithAttribute(Attribute::AllocatedPointer);
557 
558   return nullptr;
559 }
560 
561 //===----------------------------------------------------------------------===//
562 //  Utility functions to compute size of objects.
563 //
564 static APInt getSizeWithOverflow(const SizeOffsetAPInt &Data) {
565   APInt Size = Data.Size;
566   APInt Offset = Data.Offset;
567 
568   if (Offset.isNegative() || Size.ult(Offset))
569     return APInt::getZero(Size.getBitWidth());
570 
571   return Size - Offset;
572 }
573 
574 /// Compute the size of the object pointed by Ptr. Returns true and the
575 /// object size in Size if successful, and false otherwise.
576 /// If RoundToAlign is true, then Size is rounded up to the alignment of
577 /// allocas, byval arguments, and global variables.
578 bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
579                          const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) {
580   ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts);
581   SizeOffsetAPInt Data = Visitor.compute(const_cast<Value *>(Ptr));
582   if (!Data.bothKnown())
583     return false;
584 
585   Size = getSizeWithOverflow(Data).getZExtValue();
586   return true;
587 }
588 
589 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize,
590                                  const DataLayout &DL,
591                                  const TargetLibraryInfo *TLI,
592                                  bool MustSucceed) {
593   return lowerObjectSizeCall(ObjectSize, DL, TLI, /*AAResults=*/nullptr,
594                              MustSucceed);
595 }
596 
597 Value *llvm::lowerObjectSizeCall(
598     IntrinsicInst *ObjectSize, const DataLayout &DL,
599     const TargetLibraryInfo *TLI, AAResults *AA, bool MustSucceed,
600     SmallVectorImpl<Instruction *> *InsertedInstructions) {
601   assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize &&
602          "ObjectSize must be a call to llvm.objectsize!");
603 
604   bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero();
605   ObjectSizeOpts EvalOptions;
606   EvalOptions.AA = AA;
607 
608   // Unless we have to fold this to something, try to be as accurate as
609   // possible.
610   if (MustSucceed)
611     EvalOptions.EvalMode =
612         MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min;
613   else
614     EvalOptions.EvalMode = ObjectSizeOpts::Mode::ExactSizeFromOffset;
615 
616   EvalOptions.NullIsUnknownSize =
617       cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne();
618 
619   auto *ResultType = cast<IntegerType>(ObjectSize->getType());
620   bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero();
621   if (StaticOnly) {
622     // FIXME: Does it make sense to just return a failure value if the size won't
623     // fit in the output and `!MustSucceed`?
624     uint64_t Size;
625     if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) &&
626         isUIntN(ResultType->getBitWidth(), Size))
627       return ConstantInt::get(ResultType, Size);
628   } else {
629     LLVMContext &Ctx = ObjectSize->getFunction()->getContext();
630     ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions);
631     SizeOffsetValue SizeOffsetPair = Eval.compute(ObjectSize->getArgOperand(0));
632 
633     if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) {
634       IRBuilder<TargetFolder, IRBuilderCallbackInserter> Builder(
635           Ctx, TargetFolder(DL), IRBuilderCallbackInserter([&](Instruction *I) {
636             if (InsertedInstructions)
637               InsertedInstructions->push_back(I);
638           }));
639       Builder.SetInsertPoint(ObjectSize);
640 
641       Value *Size = SizeOffsetPair.Size;
642       Value *Offset = SizeOffsetPair.Offset;
643 
644       // If we've outside the end of the object, then we can always access
645       // exactly 0 bytes.
646       Value *ResultSize = Builder.CreateSub(Size, Offset);
647       Value *UseZero = Builder.CreateICmpULT(Size, Offset);
648       ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType);
649       Value *Ret = Builder.CreateSelect(
650           UseZero, ConstantInt::get(ResultType, 0), ResultSize);
651 
652       // The non-constant size expression cannot evaluate to -1.
653       if (!isa<Constant>(Size) || !isa<Constant>(Offset))
654         Builder.CreateAssumption(
655             Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1)));
656 
657       return Ret;
658     }
659   }
660 
661   if (!MustSucceed)
662     return nullptr;
663 
664   return MaxVal ? Constant::getAllOnesValue(ResultType)
665                 : Constant::getNullValue(ResultType);
666 }
667 
668 STATISTIC(ObjectVisitorArgument,
669           "Number of arguments with unsolved size and offset");
670 STATISTIC(ObjectVisitorLoad,
671           "Number of load instructions with unsolved size and offset");
672 
673 static std::optional<APInt>
674 combinePossibleConstantValues(std::optional<APInt> LHS,
675                               std::optional<APInt> RHS,
676                               ObjectSizeOpts::Mode EvalMode) {
677   if (!LHS || !RHS)
678     return std::nullopt;
679   if (EvalMode == ObjectSizeOpts::Mode::Max)
680     return LHS->sge(*RHS) ? *LHS : *RHS;
681   else
682     return LHS->sle(*RHS) ? *LHS : *RHS;
683 }
684 
685 static std::optional<APInt> aggregatePossibleConstantValuesImpl(
686     const Value *V, ObjectSizeOpts::Mode EvalMode, unsigned recursionDepth) {
687   constexpr unsigned maxRecursionDepth = 4;
688   if (recursionDepth == maxRecursionDepth)
689     return std::nullopt;
690 
691   if (const auto *CI = dyn_cast<ConstantInt>(V)) {
692     return CI->getValue();
693   } else if (const auto *SI = dyn_cast<SelectInst>(V)) {
694     return combinePossibleConstantValues(
695         aggregatePossibleConstantValuesImpl(SI->getTrueValue(), EvalMode,
696                                             recursionDepth + 1),
697         aggregatePossibleConstantValuesImpl(SI->getFalseValue(), EvalMode,
698                                             recursionDepth + 1),
699         EvalMode);
700   } else if (const auto *PN = dyn_cast<PHINode>(V)) {
701     unsigned Count = PN->getNumIncomingValues();
702     if (Count == 0)
703       return std::nullopt;
704     auto Acc = aggregatePossibleConstantValuesImpl(
705         PN->getIncomingValue(0), EvalMode, recursionDepth + 1);
706     for (unsigned I = 1; Acc && I < Count; ++I) {
707       auto Tmp = aggregatePossibleConstantValuesImpl(
708           PN->getIncomingValue(I), EvalMode, recursionDepth + 1);
709       Acc = combinePossibleConstantValues(Acc, Tmp, EvalMode);
710     }
711     return Acc;
712   }
713 
714   return std::nullopt;
715 }
716 
717 static std::optional<APInt>
718 aggregatePossibleConstantValues(const Value *V, ObjectSizeOpts::Mode EvalMode) {
719   if (auto *CI = dyn_cast<ConstantInt>(V))
720     return CI->getValue();
721 
722   if (EvalMode != ObjectSizeOpts::Mode::Min &&
723       EvalMode != ObjectSizeOpts::Mode::Max)
724     return std::nullopt;
725 
726   // Not using computeConstantRange here because we cannot guarantee it's not
727   // doing optimization based on UB which we want to avoid when expanding
728   // __builtin_object_size.
729   return aggregatePossibleConstantValuesImpl(V, EvalMode, 0u);
730 }
731 
732 /// Align \p Size according to \p Alignment. If \p Size is greater than
733 /// getSignedMaxValue(), set it as unknown as we can only represent signed value
734 /// in OffsetSpan.
735 APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) {
736   if (Options.RoundToAlign && Alignment)
737     Size = APInt(IntTyBits, alignTo(Size.getZExtValue(), *Alignment));
738 
739   return Size.isNegative() ? APInt() : Size;
740 }
741 
742 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL,
743                                                  const TargetLibraryInfo *TLI,
744                                                  LLVMContext &Context,
745                                                  ObjectSizeOpts Options)
746     : DL(DL), TLI(TLI), Options(Options) {
747   // Pointer size must be rechecked for each object visited since it could have
748   // a different address space.
749 }
750 
751 SizeOffsetAPInt ObjectSizeOffsetVisitor::compute(Value *V) {
752   InstructionsVisited = 0;
753   OffsetSpan Span = computeImpl(V);
754 
755   // In ExactSizeFromOffset mode, we don't care about the Before Field, so allow
756   // us to overwrite it if needs be.
757   if (Span.knownAfter() && !Span.knownBefore() &&
758       Options.EvalMode == ObjectSizeOpts::Mode::ExactSizeFromOffset)
759     Span.Before = APInt::getZero(Span.After.getBitWidth());
760 
761   if (!Span.bothKnown())
762     return {};
763 
764   return {Span.Before + Span.After, Span.Before};
765 }
766 
767 OffsetSpan ObjectSizeOffsetVisitor::computeImpl(Value *V) {
768   unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType());
769 
770   // Stripping pointer casts can strip address space casts which can change the
771   // index type size. The invariant is that we use the value type to determine
772   // the index type size and if we stripped address space casts we have to
773   // readjust the APInt as we pass it upwards in order for the APInt to match
774   // the type the caller passed in.
775   APInt Offset(InitialIntTyBits, 0);
776   V = V->stripAndAccumulateConstantOffsets(
777       DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true);
778 
779   // Give it another try with approximated analysis. We don't start with this
780   // one because stripAndAccumulateConstantOffsets behaves differently wrt.
781   // overflows if we provide an external Analysis.
782   if ((Options.EvalMode == ObjectSizeOpts::Mode::Min ||
783        Options.EvalMode == ObjectSizeOpts::Mode::Max) &&
784       isa<GEPOperator>(V)) {
785     // External Analysis used to compute the Min/Max value of individual Offsets
786     // within a GEP.
787     ObjectSizeOpts::Mode EvalMode =
788         Options.EvalMode == ObjectSizeOpts::Mode::Min
789             ? ObjectSizeOpts::Mode::Max
790             : ObjectSizeOpts::Mode::Min;
791     auto OffsetRangeAnalysis = [EvalMode](Value &VOffset, APInt &Offset) {
792       if (auto PossibleOffset =
793               aggregatePossibleConstantValues(&VOffset, EvalMode)) {
794         Offset = *PossibleOffset;
795         return true;
796       }
797       return false;
798     };
799 
800     V = V->stripAndAccumulateConstantOffsets(
801         DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true,
802         /*ExternalAnalysis=*/OffsetRangeAnalysis);
803   }
804 
805   // Later we use the index type size and zero but it will match the type of the
806   // value that is passed to computeImpl.
807   IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
808   Zero = APInt::getZero(IntTyBits);
809   OffsetSpan ORT = computeValue(V);
810 
811   bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits;
812   if (!IndexTypeSizeChanged && Offset.isZero())
813     return ORT;
814 
815   // We stripped an address space cast that changed the index type size or we
816   // accumulated some constant offset (or both). Readjust the bit width to match
817   // the argument index type size and apply the offset, as required.
818   if (IndexTypeSizeChanged) {
819     if (ORT.knownBefore() &&
820         !::CheckedZextOrTrunc(ORT.Before, InitialIntTyBits))
821       ORT.Before = APInt();
822     if (ORT.knownAfter() && !::CheckedZextOrTrunc(ORT.After, InitialIntTyBits))
823       ORT.After = APInt();
824   }
825   // If the computed bound is "unknown" we cannot add the stripped offset.
826   if (ORT.knownBefore()) {
827     bool Overflow;
828     ORT.Before = ORT.Before.sadd_ov(Offset, Overflow);
829     if (Overflow)
830       ORT.Before = APInt();
831   }
832   if (ORT.knownAfter()) {
833     bool Overflow;
834     ORT.After = ORT.After.ssub_ov(Offset, Overflow);
835     if (Overflow)
836       ORT.After = APInt();
837   }
838 
839   // We end up pointing on a location that's outside of the original object.
840   if (ORT.knownBefore() && ORT.Before.isNegative()) {
841     // This means that we *may* be accessing memory before the allocation.
842     // Conservatively return an unknown size.
843     //
844     // TODO: working with ranges instead of value would make it possible to take
845     // a better decision.
846     if (Options.EvalMode == ObjectSizeOpts::Mode::Min ||
847         Options.EvalMode == ObjectSizeOpts::Mode::Max) {
848       return ObjectSizeOffsetVisitor::unknown();
849     }
850     // Otherwise it's fine, caller can handle negative offset.
851   }
852   return ORT;
853 }
854 
855 OffsetSpan ObjectSizeOffsetVisitor::computeValue(Value *V) {
856   if (Instruction *I = dyn_cast<Instruction>(V)) {
857     // If we have already seen this instruction, bail out. Cycles can happen in
858     // unreachable code after constant propagation.
859     auto P = SeenInsts.try_emplace(I, ObjectSizeOffsetVisitor::unknown());
860     if (!P.second)
861       return P.first->second;
862     ++InstructionsVisited;
863     if (InstructionsVisited > ObjectSizeOffsetVisitorMaxVisitInstructions)
864       return ObjectSizeOffsetVisitor::unknown();
865     OffsetSpan Res = visit(*I);
866     // Cache the result for later visits. If we happened to visit this during
867     // the above recursion, we would consider it unknown until now.
868     SeenInsts[I] = Res;
869     return Res;
870   }
871   if (Argument *A = dyn_cast<Argument>(V))
872     return visitArgument(*A);
873   if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V))
874     return visitConstantPointerNull(*P);
875   if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
876     return visitGlobalAlias(*GA);
877   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
878     return visitGlobalVariable(*GV);
879   if (UndefValue *UV = dyn_cast<UndefValue>(V))
880     return visitUndefValue(*UV);
881 
882   LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: "
883                     << *V << '\n');
884   return ObjectSizeOffsetVisitor::unknown();
885 }
886 
887 bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) {
888   return ::CheckedZextOrTrunc(I, IntTyBits);
889 }
890 
891 OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
892   TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType());
893   if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min)
894     return ObjectSizeOffsetVisitor::unknown();
895   if (!isUIntN(IntTyBits, ElemSize.getKnownMinValue()))
896     return ObjectSizeOffsetVisitor::unknown();
897   APInt Size(IntTyBits, ElemSize.getKnownMinValue());
898 
899   if (!I.isArrayAllocation())
900     return OffsetSpan(Zero, align(Size, I.getAlign()));
901 
902   Value *ArraySize = I.getArraySize();
903   if (auto PossibleSize =
904           aggregatePossibleConstantValues(ArraySize, Options.EvalMode)) {
905     APInt NumElems = *PossibleSize;
906     if (!CheckedZextOrTrunc(NumElems))
907       return ObjectSizeOffsetVisitor::unknown();
908 
909     bool Overflow;
910     Size = Size.umul_ov(NumElems, Overflow);
911 
912     return Overflow ? ObjectSizeOffsetVisitor::unknown()
913                     : OffsetSpan(Zero, align(Size, I.getAlign()));
914   }
915   return ObjectSizeOffsetVisitor::unknown();
916 }
917 
918 OffsetSpan ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
919   Type *MemoryTy = A.getPointeeInMemoryValueType();
920   // No interprocedural analysis is done at the moment.
921   if (!MemoryTy|| !MemoryTy->isSized()) {
922     ++ObjectVisitorArgument;
923     return ObjectSizeOffsetVisitor::unknown();
924   }
925 
926   APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy));
927   return OffsetSpan(Zero, align(Size, A.getParamAlign()));
928 }
929 
930 OffsetSpan ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
931   auto Mapper = [this](const Value *V) -> const Value * {
932     if (!V->getType()->isIntegerTy())
933       return V;
934 
935     if (auto PossibleBound =
936             aggregatePossibleConstantValues(V, Options.EvalMode))
937       return ConstantInt::get(V->getType(), *PossibleBound);
938 
939     return V;
940   };
941 
942   if (std::optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) {
943     // Very large unsigned value cannot be represented as OffsetSpan.
944     if (Size->isNegative())
945       return ObjectSizeOffsetVisitor::unknown();
946     return OffsetSpan(Zero, *Size);
947   }
948   return ObjectSizeOffsetVisitor::unknown();
949 }
950 
951 OffsetSpan
952 ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull &CPN) {
953   // If null is unknown, there's nothing we can do. Additionally, non-zero
954   // address spaces can make use of null, so we don't presume to know anything
955   // about that.
956   //
957   // TODO: How should this work with address space casts? We currently just drop
958   // them on the floor, but it's unclear what we should do when a NULL from
959   // addrspace(1) gets casted to addrspace(0) (or vice-versa).
960   if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace())
961     return ObjectSizeOffsetVisitor::unknown();
962   return OffsetSpan(Zero, Zero);
963 }
964 
965 OffsetSpan
966 ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst &) {
967   return ObjectSizeOffsetVisitor::unknown();
968 }
969 
970 OffsetSpan ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst &) {
971   // Easy cases were already folded by previous passes.
972   return ObjectSizeOffsetVisitor::unknown();
973 }
974 
975 OffsetSpan ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) {
976   if (GA.isInterposable())
977     return ObjectSizeOffsetVisitor::unknown();
978   return computeImpl(GA.getAliasee());
979 }
980 
981 OffsetSpan ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV) {
982   if (!GV.getValueType()->isSized() || GV.hasExternalWeakLinkage() ||
983       ((!GV.hasInitializer() || GV.isInterposable()) &&
984        Options.EvalMode != ObjectSizeOpts::Mode::Min))
985     return ObjectSizeOffsetVisitor::unknown();
986 
987   APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType()));
988   return OffsetSpan(Zero, align(Size, GV.getAlign()));
989 }
990 
991 OffsetSpan ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst &) {
992   // clueless
993   return ObjectSizeOffsetVisitor::unknown();
994 }
995 
996 OffsetSpan ObjectSizeOffsetVisitor::findLoadOffsetRange(
997     LoadInst &Load, BasicBlock &BB, BasicBlock::iterator From,
998     SmallDenseMap<BasicBlock *, OffsetSpan, 8> &VisitedBlocks,
999     unsigned &ScannedInstCount) {
1000   constexpr unsigned MaxInstsToScan = 128;
1001 
1002   auto Where = VisitedBlocks.find(&BB);
1003   if (Where != VisitedBlocks.end())
1004     return Where->second;
1005 
1006   auto Unknown = [&BB, &VisitedBlocks]() {
1007     return VisitedBlocks[&BB] = ObjectSizeOffsetVisitor::unknown();
1008   };
1009   auto Known = [&BB, &VisitedBlocks](OffsetSpan SO) {
1010     return VisitedBlocks[&BB] = SO;
1011   };
1012 
1013   do {
1014     Instruction &I = *From;
1015 
1016     if (I.isDebugOrPseudoInst())
1017       continue;
1018 
1019     if (++ScannedInstCount > MaxInstsToScan)
1020       return Unknown();
1021 
1022     if (!I.mayWriteToMemory())
1023       continue;
1024 
1025     if (auto *SI = dyn_cast<StoreInst>(&I)) {
1026       AliasResult AR =
1027           Options.AA->alias(SI->getPointerOperand(), Load.getPointerOperand());
1028       switch ((AliasResult::Kind)AR) {
1029       case AliasResult::NoAlias:
1030         continue;
1031       case AliasResult::MustAlias:
1032         if (SI->getValueOperand()->getType()->isPointerTy())
1033           return Known(computeImpl(SI->getValueOperand()));
1034         else
1035           return Unknown(); // No handling of non-pointer values by `compute`.
1036       default:
1037         return Unknown();
1038       }
1039     }
1040 
1041     if (auto *CB = dyn_cast<CallBase>(&I)) {
1042       Function *Callee = CB->getCalledFunction();
1043       // Bail out on indirect call.
1044       if (!Callee)
1045         return Unknown();
1046 
1047       LibFunc TLIFn;
1048       if (!TLI || !TLI->getLibFunc(*CB->getCalledFunction(), TLIFn) ||
1049           !TLI->has(TLIFn))
1050         return Unknown();
1051 
1052       // TODO: There's probably more interesting case to support here.
1053       if (TLIFn != LibFunc_posix_memalign)
1054         return Unknown();
1055 
1056       AliasResult AR =
1057           Options.AA->alias(CB->getOperand(0), Load.getPointerOperand());
1058       switch ((AliasResult::Kind)AR) {
1059       case AliasResult::NoAlias:
1060         continue;
1061       case AliasResult::MustAlias:
1062         break;
1063       default:
1064         return Unknown();
1065       }
1066 
1067       // Is the error status of posix_memalign correctly checked? If not it
1068       // would be incorrect to assume it succeeds and load doesn't see the
1069       // previous value.
1070       std::optional<bool> Checked = isImpliedByDomCondition(
1071           ICmpInst::ICMP_EQ, CB, ConstantInt::get(CB->getType(), 0), &Load, DL);
1072       if (!Checked || !*Checked)
1073         return Unknown();
1074 
1075       Value *Size = CB->getOperand(2);
1076       auto *C = dyn_cast<ConstantInt>(Size);
1077       if (!C)
1078         return Unknown();
1079 
1080       APInt CSize = C->getValue();
1081       if (CSize.isNegative())
1082         return Unknown();
1083 
1084       return Known({APInt(CSize.getBitWidth(), 0), CSize});
1085     }
1086 
1087     return Unknown();
1088   } while (From-- != BB.begin());
1089 
1090   SmallVector<OffsetSpan> PredecessorSizeOffsets;
1091   for (auto *PredBB : predecessors(&BB)) {
1092     PredecessorSizeOffsets.push_back(findLoadOffsetRange(
1093         Load, *PredBB, BasicBlock::iterator(PredBB->getTerminator()),
1094         VisitedBlocks, ScannedInstCount));
1095     if (!PredecessorSizeOffsets.back().bothKnown())
1096       return Unknown();
1097   }
1098 
1099   if (PredecessorSizeOffsets.empty())
1100     return Unknown();
1101 
1102   return Known(std::accumulate(
1103       PredecessorSizeOffsets.begin() + 1, PredecessorSizeOffsets.end(),
1104       PredecessorSizeOffsets.front(), [this](OffsetSpan LHS, OffsetSpan RHS) {
1105         return combineOffsetRange(LHS, RHS);
1106       }));
1107 }
1108 
1109 OffsetSpan ObjectSizeOffsetVisitor::visitLoadInst(LoadInst &LI) {
1110   if (!Options.AA) {
1111     ++ObjectVisitorLoad;
1112     return ObjectSizeOffsetVisitor::unknown();
1113   }
1114 
1115   SmallDenseMap<BasicBlock *, OffsetSpan, 8> VisitedBlocks;
1116   unsigned ScannedInstCount = 0;
1117   OffsetSpan SO =
1118       findLoadOffsetRange(LI, *LI.getParent(), BasicBlock::iterator(LI),
1119                           VisitedBlocks, ScannedInstCount);
1120   if (!SO.bothKnown())
1121     ++ObjectVisitorLoad;
1122   return SO;
1123 }
1124 
1125 OffsetSpan ObjectSizeOffsetVisitor::combineOffsetRange(OffsetSpan LHS,
1126                                                        OffsetSpan RHS) {
1127   if (!LHS.bothKnown() || !RHS.bothKnown())
1128     return ObjectSizeOffsetVisitor::unknown();
1129 
1130   switch (Options.EvalMode) {
1131   case ObjectSizeOpts::Mode::Min:
1132     return {LHS.Before.slt(RHS.Before) ? LHS.Before : RHS.Before,
1133             LHS.After.slt(RHS.After) ? LHS.After : RHS.After};
1134   case ObjectSizeOpts::Mode::Max: {
1135     return {LHS.Before.sgt(RHS.Before) ? LHS.Before : RHS.Before,
1136             LHS.After.sgt(RHS.After) ? LHS.After : RHS.After};
1137   }
1138   case ObjectSizeOpts::Mode::ExactSizeFromOffset:
1139     return {LHS.Before.eq(RHS.Before) ? LHS.Before : APInt(),
1140             LHS.After.eq(RHS.After) ? LHS.After : APInt()};
1141   case ObjectSizeOpts::Mode::ExactUnderlyingSizeAndOffset:
1142     return (LHS == RHS) ? LHS : ObjectSizeOffsetVisitor::unknown();
1143   }
1144   llvm_unreachable("missing an eval mode");
1145 }
1146 
1147 OffsetSpan ObjectSizeOffsetVisitor::visitPHINode(PHINode &PN) {
1148   if (PN.getNumIncomingValues() == 0)
1149     return ObjectSizeOffsetVisitor::unknown();
1150   auto IncomingValues = PN.incoming_values();
1151   return std::accumulate(IncomingValues.begin() + 1, IncomingValues.end(),
1152                          computeImpl(*IncomingValues.begin()),
1153                          [this](OffsetSpan LHS, Value *VRHS) {
1154                            return combineOffsetRange(LHS, computeImpl(VRHS));
1155                          });
1156 }
1157 
1158 OffsetSpan ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) {
1159   return combineOffsetRange(computeImpl(I.getTrueValue()),
1160                             computeImpl(I.getFalseValue()));
1161 }
1162 
1163 OffsetSpan ObjectSizeOffsetVisitor::visitUndefValue(UndefValue &) {
1164   return OffsetSpan(Zero, Zero);
1165 }
1166 
1167 OffsetSpan ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
1168   LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I
1169                     << '\n');
1170   return ObjectSizeOffsetVisitor::unknown();
1171 }
1172 
1173 // Just set these right here...
1174 SizeOffsetValue::SizeOffsetValue(const SizeOffsetWeakTrackingVH &SOT)
1175     : SizeOffsetType(SOT.Size, SOT.Offset) {}
1176 
1177 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(
1178     const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context,
1179     ObjectSizeOpts EvalOpts)
1180     : DL(DL), TLI(TLI), Context(Context),
1181       Builder(Context, TargetFolder(DL),
1182               IRBuilderCallbackInserter(
1183                   [&](Instruction *I) { InsertedInstructions.insert(I); })),
1184       EvalOpts(EvalOpts) {
1185   // IntTy and Zero must be set for each compute() since the address space may
1186   // be different for later objects.
1187 }
1188 
1189 SizeOffsetValue ObjectSizeOffsetEvaluator::compute(Value *V) {
1190   // XXX - Are vectors of pointers possible here?
1191   IntTy = cast<IntegerType>(DL.getIndexType(V->getType()));
1192   Zero = ConstantInt::get(IntTy, 0);
1193 
1194   SizeOffsetValue Result = compute_(V);
1195 
1196   if (!Result.bothKnown()) {
1197     // Erase everything that was computed in this iteration from the cache, so
1198     // that no dangling references are left behind. We could be a bit smarter if
1199     // we kept a dependency graph. It's probably not worth the complexity.
1200     for (const Value *SeenVal : SeenVals) {
1201       CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal);
1202       // non-computable results can be safely cached
1203       if (CacheIt != CacheMap.end() && CacheIt->second.anyKnown())
1204         CacheMap.erase(CacheIt);
1205     }
1206 
1207     // Erase any instructions we inserted as part of the traversal.
1208     for (Instruction *I : InsertedInstructions) {
1209       I->replaceAllUsesWith(PoisonValue::get(I->getType()));
1210       I->eraseFromParent();
1211     }
1212   }
1213 
1214   SeenVals.clear();
1215   InsertedInstructions.clear();
1216   return Result;
1217 }
1218 
1219 SizeOffsetValue ObjectSizeOffsetEvaluator::compute_(Value *V) {
1220 
1221   // Only trust ObjectSizeOffsetVisitor in exact mode, otherwise fallback on
1222   // dynamic computation.
1223   ObjectSizeOpts VisitorEvalOpts(EvalOpts);
1224   VisitorEvalOpts.EvalMode = ObjectSizeOpts::Mode::ExactUnderlyingSizeAndOffset;
1225   ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, VisitorEvalOpts);
1226 
1227   SizeOffsetAPInt Const = Visitor.compute(V);
1228   if (Const.bothKnown())
1229     return SizeOffsetValue(ConstantInt::get(Context, Const.Size),
1230                            ConstantInt::get(Context, Const.Offset));
1231 
1232   V = V->stripPointerCasts();
1233 
1234   // Check cache.
1235   CacheMapTy::iterator CacheIt = CacheMap.find(V);
1236   if (CacheIt != CacheMap.end())
1237     return CacheIt->second;
1238 
1239   // Always generate code immediately before the instruction being
1240   // processed, so that the generated code dominates the same BBs.
1241   BuilderTy::InsertPointGuard Guard(Builder);
1242   if (Instruction *I = dyn_cast<Instruction>(V))
1243     Builder.SetInsertPoint(I);
1244 
1245   // Now compute the size and offset.
1246   SizeOffsetValue Result;
1247 
1248   // Record the pointers that were handled in this run, so that they can be
1249   // cleaned later if something fails. We also use this set to break cycles that
1250   // can occur in dead code.
1251   if (!SeenVals.insert(V).second) {
1252     Result = ObjectSizeOffsetEvaluator::unknown();
1253   } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1254     Result = visitGEPOperator(*GEP);
1255   } else if (Instruction *I = dyn_cast<Instruction>(V)) {
1256     Result = visit(*I);
1257   } else if (isa<Argument>(V) ||
1258              (isa<ConstantExpr>(V) &&
1259               cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) ||
1260              isa<GlobalAlias>(V) ||
1261              isa<GlobalVariable>(V)) {
1262     // Ignore values where we cannot do more than ObjectSizeVisitor.
1263     Result = ObjectSizeOffsetEvaluator::unknown();
1264   } else {
1265     LLVM_DEBUG(
1266         dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V
1267                << '\n');
1268     Result = ObjectSizeOffsetEvaluator::unknown();
1269   }
1270 
1271   // Don't reuse CacheIt since it may be invalid at this point.
1272   CacheMap[V] = SizeOffsetWeakTrackingVH(Result);
1273   return Result;
1274 }
1275 
1276 SizeOffsetValue ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) {
1277   if (!I.getAllocatedType()->isSized())
1278     return ObjectSizeOffsetEvaluator::unknown();
1279 
1280   // must be a VLA or vscale.
1281   assert(I.isArrayAllocation() || I.getAllocatedType()->isScalableTy());
1282 
1283   // If needed, adjust the alloca's operand size to match the pointer indexing
1284   // size. Subsequent math operations expect the types to match.
1285   Value *ArraySize = Builder.CreateZExtOrTrunc(
1286       I.getArraySize(),
1287       DL.getIndexType(I.getContext(), DL.getAllocaAddrSpace()));
1288   assert(ArraySize->getType() == Zero->getType() &&
1289          "Expected zero constant to have pointer index type");
1290 
1291   Value *Size = Builder.CreateTypeSize(
1292       ArraySize->getType(), DL.getTypeAllocSize(I.getAllocatedType()));
1293   Size = Builder.CreateMul(Size, ArraySize);
1294   return SizeOffsetValue(Size, Zero);
1295 }
1296 
1297 SizeOffsetValue ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) {
1298   std::optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI);
1299   if (!FnData)
1300     return ObjectSizeOffsetEvaluator::unknown();
1301 
1302   // Handle strdup-like functions separately.
1303   if (FnData->AllocTy == StrDupLike) {
1304     // TODO: implement evaluation of strdup/strndup
1305     return ObjectSizeOffsetEvaluator::unknown();
1306   }
1307 
1308   Value *FirstArg = CB.getArgOperand(FnData->FstParam);
1309   FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy);
1310   if (FnData->SndParam < 0)
1311     return SizeOffsetValue(FirstArg, Zero);
1312 
1313   Value *SecondArg = CB.getArgOperand(FnData->SndParam);
1314   SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy);
1315   Value *Size = Builder.CreateMul(FirstArg, SecondArg);
1316   return SizeOffsetValue(Size, Zero);
1317 }
1318 
1319 SizeOffsetValue
1320 ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst &) {
1321   return ObjectSizeOffsetEvaluator::unknown();
1322 }
1323 
1324 SizeOffsetValue
1325 ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst &) {
1326   return ObjectSizeOffsetEvaluator::unknown();
1327 }
1328 
1329 SizeOffsetValue ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) {
1330   SizeOffsetValue PtrData = compute_(GEP.getPointerOperand());
1331   if (!PtrData.bothKnown())
1332     return ObjectSizeOffsetEvaluator::unknown();
1333 
1334   Value *Offset = emitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true);
1335   Offset = Builder.CreateAdd(PtrData.Offset, Offset);
1336   return SizeOffsetValue(PtrData.Size, Offset);
1337 }
1338 
1339 SizeOffsetValue ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst &) {
1340   // clueless
1341   return ObjectSizeOffsetEvaluator::unknown();
1342 }
1343 
1344 SizeOffsetValue ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst &LI) {
1345   return ObjectSizeOffsetEvaluator::unknown();
1346 }
1347 
1348 SizeOffsetValue ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
1349   // Create 2 PHIs: one for size and another for offset.
1350   PHINode *SizePHI   = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues());
1351   PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues());
1352 
1353   // Insert right away in the cache to handle recursive PHIs.
1354   CacheMap[&PHI] = SizeOffsetWeakTrackingVH(SizePHI, OffsetPHI);
1355 
1356   // Compute offset/size for each PHI incoming pointer.
1357   for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) {
1358     BasicBlock *IncomingBlock = PHI.getIncomingBlock(i);
1359     Builder.SetInsertPoint(IncomingBlock, IncomingBlock->getFirstInsertionPt());
1360     SizeOffsetValue EdgeData = compute_(PHI.getIncomingValue(i));
1361 
1362     if (!EdgeData.bothKnown()) {
1363       OffsetPHI->replaceAllUsesWith(PoisonValue::get(IntTy));
1364       OffsetPHI->eraseFromParent();
1365       InsertedInstructions.erase(OffsetPHI);
1366       SizePHI->replaceAllUsesWith(PoisonValue::get(IntTy));
1367       SizePHI->eraseFromParent();
1368       InsertedInstructions.erase(SizePHI);
1369       return ObjectSizeOffsetEvaluator::unknown();
1370     }
1371     SizePHI->addIncoming(EdgeData.Size, IncomingBlock);
1372     OffsetPHI->addIncoming(EdgeData.Offset, IncomingBlock);
1373   }
1374 
1375   Value *Size = SizePHI, *Offset = OffsetPHI;
1376   if (Value *Tmp = SizePHI->hasConstantValue()) {
1377     Size = Tmp;
1378     SizePHI->replaceAllUsesWith(Size);
1379     SizePHI->eraseFromParent();
1380     InsertedInstructions.erase(SizePHI);
1381   }
1382   if (Value *Tmp = OffsetPHI->hasConstantValue()) {
1383     Offset = Tmp;
1384     OffsetPHI->replaceAllUsesWith(Offset);
1385     OffsetPHI->eraseFromParent();
1386     InsertedInstructions.erase(OffsetPHI);
1387   }
1388   return SizeOffsetValue(Size, Offset);
1389 }
1390 
1391 SizeOffsetValue ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) {
1392   SizeOffsetValue TrueSide = compute_(I.getTrueValue());
1393   SizeOffsetValue FalseSide = compute_(I.getFalseValue());
1394 
1395   if (!TrueSide.bothKnown() || !FalseSide.bothKnown())
1396     return ObjectSizeOffsetEvaluator::unknown();
1397   if (TrueSide == FalseSide)
1398     return TrueSide;
1399 
1400   Value *Size =
1401       Builder.CreateSelect(I.getCondition(), TrueSide.Size, FalseSide.Size);
1402   Value *Offset =
1403       Builder.CreateSelect(I.getCondition(), TrueSide.Offset, FalseSide.Offset);
1404   return SizeOffsetValue(Size, Offset);
1405 }
1406 
1407 SizeOffsetValue ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) {
1408   LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I
1409                     << '\n');
1410   return ObjectSizeOffsetEvaluator::unknown();
1411 }
1412