xref: /llvm-project/llvm/lib/Transforms/IPO/GlobalOpt.cpp (revision 1b1270f30bbdb2c7a310009d0512e167b09bac48)
1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass transforms simple global variables that never have their address
10 // taken.  If obviously true, it marks read/write globals as constant, deletes
11 // variables only stored to, etc.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/IPO/GlobalOpt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/ADT/Twine.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/BlockFrequencyInfo.h"
24 #include "llvm/Analysis/ConstantFolding.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/BinaryFormat/Dwarf.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DebugInfoMetadata.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalValue.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/IRBuilder.h"
44 #include "llvm/IR/InstrTypes.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/IR/Module.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/Use.h"
52 #include "llvm/IR/User.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/IR/ValueHandle.h"
55 #include "llvm/Support/AtomicOrdering.h"
56 #include "llvm/Support/Casting.h"
57 #include "llvm/Support/CommandLine.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include "llvm/Transforms/IPO.h"
62 #include "llvm/Transforms/Utils/CtorUtils.h"
63 #include "llvm/Transforms/Utils/Evaluator.h"
64 #include "llvm/Transforms/Utils/GlobalStatus.h"
65 #include "llvm/Transforms/Utils/Local.h"
66 #include <cassert>
67 #include <cstdint>
68 #include <optional>
69 #include <utility>
70 #include <vector>
71 
72 using namespace llvm;
73 
74 #define DEBUG_TYPE "globalopt"
75 
76 STATISTIC(NumMarked    , "Number of globals marked constant");
77 STATISTIC(NumUnnamed   , "Number of globals marked unnamed_addr");
78 STATISTIC(NumSRA       , "Number of aggregate globals broken into scalars");
79 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
80 STATISTIC(NumDeleted   , "Number of globals deleted");
81 STATISTIC(NumGlobUses  , "Number of global uses devirtualized");
82 STATISTIC(NumLocalized , "Number of globals localized");
83 STATISTIC(NumShrunkToBool  , "Number of global vars shrunk to booleans");
84 STATISTIC(NumFastCallFns   , "Number of functions converted to fastcc");
85 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
86 STATISTIC(NumNestRemoved   , "Number of nest attributes removed");
87 STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
88 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
89 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
90 STATISTIC(NumAtExitRemoved, "Number of atexit handlers removed");
91 STATISTIC(NumInternalFunc, "Number of internal functions");
92 STATISTIC(NumColdCC, "Number of functions marked coldcc");
93 STATISTIC(NumIFuncsResolved, "Number of statically resolved IFuncs");
94 STATISTIC(NumIFuncsDeleted, "Number of IFuncs removed");
95 STATISTIC(NumGlobalArraysPadded,
96           "Number of global arrays padded to alignment boundary");
97 
98 static cl::opt<bool>
99     OptimizeNonFMVCallers("optimize-non-fmv-callers",
100                           cl::desc("Statically resolve calls to versioned "
101                                    "functions from non-versioned callers."),
102                           cl::init(true), cl::Hidden);
103 
104 static cl::opt<bool>
105     EnableColdCCStressTest("enable-coldcc-stress-test",
106                            cl::desc("Enable stress test of coldcc by adding "
107                                     "calling conv to all internal functions."),
108                            cl::init(false), cl::Hidden);
109 
110 static cl::opt<int> ColdCCRelFreq(
111     "coldcc-rel-freq", cl::Hidden, cl::init(2),
112     cl::desc(
113         "Maximum block frequency, expressed as a percentage of caller's "
114         "entry frequency, for a call site to be considered cold for enabling "
115         "coldcc"));
116 
117 /// Is this global variable possibly used by a leak checker as a root?  If so,
118 /// we might not really want to eliminate the stores to it.
119 static bool isLeakCheckerRoot(GlobalVariable *GV) {
120   // A global variable is a root if it is a pointer, or could plausibly contain
121   // a pointer.  There are two challenges; one is that we could have a struct
122   // the has an inner member which is a pointer.  We recurse through the type to
123   // detect these (up to a point).  The other is that we may actually be a union
124   // of a pointer and another type, and so our LLVM type is an integer which
125   // gets converted into a pointer, or our type is an [i8 x #] with a pointer
126   // potentially contained here.
127 
128   if (GV->hasPrivateLinkage())
129     return false;
130 
131   SmallVector<Type *, 4> Types;
132   Types.push_back(GV->getValueType());
133 
134   unsigned Limit = 20;
135   do {
136     Type *Ty = Types.pop_back_val();
137     switch (Ty->getTypeID()) {
138       default: break;
139       case Type::PointerTyID:
140         return true;
141       case Type::FixedVectorTyID:
142       case Type::ScalableVectorTyID:
143         if (cast<VectorType>(Ty)->getElementType()->isPointerTy())
144           return true;
145         break;
146       case Type::ArrayTyID:
147         Types.push_back(cast<ArrayType>(Ty)->getElementType());
148         break;
149       case Type::StructTyID: {
150         StructType *STy = cast<StructType>(Ty);
151         if (STy->isOpaque()) return true;
152         for (Type *InnerTy : STy->elements()) {
153           if (isa<PointerType>(InnerTy)) return true;
154           if (isa<StructType>(InnerTy) || isa<ArrayType>(InnerTy) ||
155               isa<VectorType>(InnerTy))
156             Types.push_back(InnerTy);
157         }
158         break;
159       }
160     }
161     if (--Limit == 0) return true;
162   } while (!Types.empty());
163   return false;
164 }
165 
166 /// Given a value that is stored to a global but never read, determine whether
167 /// it's safe to remove the store and the chain of computation that feeds the
168 /// store.
169 static bool IsSafeComputationToRemove(
170     Value *V, function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
171   do {
172     if (isa<Constant>(V))
173       return true;
174     if (!V->hasOneUse())
175       return false;
176     if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
177         isa<GlobalValue>(V))
178       return false;
179     if (isAllocationFn(V, GetTLI))
180       return true;
181 
182     Instruction *I = cast<Instruction>(V);
183     if (I->mayHaveSideEffects())
184       return false;
185     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
186       if (!GEP->hasAllConstantIndices())
187         return false;
188     } else if (I->getNumOperands() != 1) {
189       return false;
190     }
191 
192     V = I->getOperand(0);
193   } while (true);
194 }
195 
196 /// This GV is a pointer root.  Loop over all users of the global and clean up
197 /// any that obviously don't assign the global a value that isn't dynamically
198 /// allocated.
199 static bool
200 CleanupPointerRootUsers(GlobalVariable *GV,
201                         function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
202   // A brief explanation of leak checkers.  The goal is to find bugs where
203   // pointers are forgotten, causing an accumulating growth in memory
204   // usage over time.  The common strategy for leak checkers is to explicitly
205   // allow the memory pointed to by globals at exit.  This is popular because it
206   // also solves another problem where the main thread of a C++ program may shut
207   // down before other threads that are still expecting to use those globals. To
208   // handle that case, we expect the program may create a singleton and never
209   // destroy it.
210 
211   bool Changed = false;
212 
213   // If Dead[n].first is the only use of a malloc result, we can delete its
214   // chain of computation and the store to the global in Dead[n].second.
215   SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
216 
217   SmallVector<User *> Worklist(GV->users());
218   // Constants can't be pointers to dynamically allocated memory.
219   while (!Worklist.empty()) {
220     User *U = Worklist.pop_back_val();
221     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
222       Value *V = SI->getValueOperand();
223       if (isa<Constant>(V)) {
224         Changed = true;
225         SI->eraseFromParent();
226       } else if (Instruction *I = dyn_cast<Instruction>(V)) {
227         if (I->hasOneUse())
228           Dead.push_back(std::make_pair(I, SI));
229       }
230     } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
231       if (isa<Constant>(MSI->getValue())) {
232         Changed = true;
233         MSI->eraseFromParent();
234       } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
235         if (I->hasOneUse())
236           Dead.push_back(std::make_pair(I, MSI));
237       }
238     } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
239       GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
240       if (MemSrc && MemSrc->isConstant()) {
241         Changed = true;
242         MTI->eraseFromParent();
243       } else if (Instruction *I = dyn_cast<Instruction>(MTI->getSource())) {
244         if (I->hasOneUse())
245           Dead.push_back(std::make_pair(I, MTI));
246       }
247     } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
248       if (isa<GEPOperator>(CE))
249         append_range(Worklist, CE->users());
250     }
251   }
252 
253   for (int i = 0, e = Dead.size(); i != e; ++i) {
254     if (IsSafeComputationToRemove(Dead[i].first, GetTLI)) {
255       Dead[i].second->eraseFromParent();
256       Instruction *I = Dead[i].first;
257       do {
258         if (isAllocationFn(I, GetTLI))
259           break;
260         Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
261         if (!J)
262           break;
263         I->eraseFromParent();
264         I = J;
265       } while (true);
266       I->eraseFromParent();
267       Changed = true;
268     }
269   }
270 
271   GV->removeDeadConstantUsers();
272   return Changed;
273 }
274 
275 /// We just marked GV constant.  Loop over all users of the global, cleaning up
276 /// the obvious ones.  This is largely just a quick scan over the use list to
277 /// clean up the easy and obvious cruft.  This returns true if it made a change.
278 static bool CleanupConstantGlobalUsers(GlobalVariable *GV,
279                                        const DataLayout &DL) {
280   Constant *Init = GV->getInitializer();
281   SmallVector<User *, 8> WorkList(GV->users());
282   SmallPtrSet<User *, 8> Visited;
283   bool Changed = false;
284 
285   SmallVector<WeakTrackingVH> MaybeDeadInsts;
286   auto EraseFromParent = [&](Instruction *I) {
287     for (Value *Op : I->operands())
288       if (auto *OpI = dyn_cast<Instruction>(Op))
289         MaybeDeadInsts.push_back(OpI);
290     I->eraseFromParent();
291     Changed = true;
292   };
293   while (!WorkList.empty()) {
294     User *U = WorkList.pop_back_val();
295     if (!Visited.insert(U).second)
296       continue;
297 
298     if (auto *BO = dyn_cast<BitCastOperator>(U))
299       append_range(WorkList, BO->users());
300     if (auto *ASC = dyn_cast<AddrSpaceCastOperator>(U))
301       append_range(WorkList, ASC->users());
302     else if (auto *GEP = dyn_cast<GEPOperator>(U))
303       append_range(WorkList, GEP->users());
304     else if (auto *LI = dyn_cast<LoadInst>(U)) {
305       // A load from a uniform value is always the same, regardless of any
306       // applied offset.
307       Type *Ty = LI->getType();
308       if (Constant *Res = ConstantFoldLoadFromUniformValue(Init, Ty, DL)) {
309         LI->replaceAllUsesWith(Res);
310         EraseFromParent(LI);
311         continue;
312       }
313 
314       Value *PtrOp = LI->getPointerOperand();
315       APInt Offset(DL.getIndexTypeSizeInBits(PtrOp->getType()), 0);
316       PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
317           DL, Offset, /* AllowNonInbounds */ true);
318       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(PtrOp)) {
319         if (II->getIntrinsicID() == Intrinsic::threadlocal_address)
320           PtrOp = II->getArgOperand(0);
321       }
322       if (PtrOp == GV) {
323         if (auto *Value = ConstantFoldLoadFromConst(Init, Ty, Offset, DL)) {
324           LI->replaceAllUsesWith(Value);
325           EraseFromParent(LI);
326         }
327       }
328     } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
329       // Store must be unreachable or storing Init into the global.
330       EraseFromParent(SI);
331     } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
332       if (getUnderlyingObject(MI->getRawDest()) == GV)
333         EraseFromParent(MI);
334     } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
335       if (II->getIntrinsicID() == Intrinsic::threadlocal_address)
336         append_range(WorkList, II->users());
337     }
338   }
339 
340   Changed |=
341       RecursivelyDeleteTriviallyDeadInstructionsPermissive(MaybeDeadInsts);
342   GV->removeDeadConstantUsers();
343   return Changed;
344 }
345 
346 /// Part of the global at a specific offset, which is only accessed through
347 /// loads and stores with the given type.
348 struct GlobalPart {
349   Type *Ty;
350   Constant *Initializer = nullptr;
351   bool IsLoaded = false;
352   bool IsStored = false;
353 };
354 
355 /// Look at all uses of the global and determine which (offset, type) pairs it
356 /// can be split into.
357 static bool collectSRATypes(DenseMap<uint64_t, GlobalPart> &Parts,
358                             GlobalVariable *GV, const DataLayout &DL) {
359   SmallVector<Use *, 16> Worklist;
360   SmallPtrSet<Use *, 16> Visited;
361   auto AppendUses = [&](Value *V) {
362     for (Use &U : V->uses())
363       if (Visited.insert(&U).second)
364         Worklist.push_back(&U);
365   };
366   AppendUses(GV);
367   while (!Worklist.empty()) {
368     Use *U = Worklist.pop_back_val();
369     User *V = U->getUser();
370 
371     auto *GEP = dyn_cast<GEPOperator>(V);
372     if (isa<BitCastOperator>(V) || isa<AddrSpaceCastOperator>(V) ||
373         (GEP && GEP->hasAllConstantIndices())) {
374       AppendUses(V);
375       continue;
376     }
377 
378     if (Value *Ptr = getLoadStorePointerOperand(V)) {
379       // This is storing the global address into somewhere, not storing into
380       // the global.
381       if (isa<StoreInst>(V) && U->getOperandNo() == 0)
382         return false;
383 
384       APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
385       Ptr = Ptr->stripAndAccumulateConstantOffsets(DL, Offset,
386                                                    /* AllowNonInbounds */ true);
387       if (Ptr != GV || Offset.getActiveBits() >= 64)
388         return false;
389 
390       // TODO: We currently require that all accesses at a given offset must
391       // use the same type. This could be relaxed.
392       Type *Ty = getLoadStoreType(V);
393       const auto &[It, Inserted] =
394           Parts.try_emplace(Offset.getZExtValue(), GlobalPart{Ty});
395       if (Ty != It->second.Ty)
396         return false;
397 
398       if (Inserted) {
399         It->second.Initializer =
400             ConstantFoldLoadFromConst(GV->getInitializer(), Ty, Offset, DL);
401         if (!It->second.Initializer) {
402           LLVM_DEBUG(dbgs() << "Global SRA: Failed to evaluate initializer of "
403                             << *GV << " with type " << *Ty << " at offset "
404                             << Offset.getZExtValue());
405           return false;
406         }
407       }
408 
409       // Scalable types not currently supported.
410       if (Ty->isScalableTy())
411         return false;
412 
413       auto IsStored = [](Value *V, Constant *Initializer) {
414         auto *SI = dyn_cast<StoreInst>(V);
415         if (!SI)
416           return false;
417 
418         Constant *StoredConst = dyn_cast<Constant>(SI->getOperand(0));
419         if (!StoredConst)
420           return true;
421 
422         // Don't consider stores that only write the initializer value.
423         return Initializer != StoredConst;
424       };
425 
426       It->second.IsLoaded |= isa<LoadInst>(V);
427       It->second.IsStored |= IsStored(V, It->second.Initializer);
428       continue;
429     }
430 
431     // Ignore dead constant users.
432     if (auto *C = dyn_cast<Constant>(V)) {
433       if (!isSafeToDestroyConstant(C))
434         return false;
435       continue;
436     }
437 
438     // Unknown user.
439     return false;
440   }
441 
442   return true;
443 }
444 
445 /// Copy over the debug info for a variable to its SRA replacements.
446 static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV,
447                                  uint64_t FragmentOffsetInBits,
448                                  uint64_t FragmentSizeInBits,
449                                  uint64_t VarSize) {
450   SmallVector<DIGlobalVariableExpression *, 1> GVs;
451   GV->getDebugInfo(GVs);
452   for (auto *GVE : GVs) {
453     DIVariable *Var = GVE->getVariable();
454     DIExpression *Expr = GVE->getExpression();
455     int64_t CurVarOffsetInBytes = 0;
456     uint64_t CurVarOffsetInBits = 0;
457     uint64_t FragmentEndInBits = FragmentOffsetInBits + FragmentSizeInBits;
458 
459     // Calculate the offset (Bytes), Continue if unknown.
460     if (!Expr->extractIfOffset(CurVarOffsetInBytes))
461       continue;
462 
463     // Ignore negative offset.
464     if (CurVarOffsetInBytes < 0)
465       continue;
466 
467     // Convert offset to bits.
468     CurVarOffsetInBits = CHAR_BIT * (uint64_t)CurVarOffsetInBytes;
469 
470     // Current var starts after the fragment, ignore.
471     if (CurVarOffsetInBits >= FragmentEndInBits)
472       continue;
473 
474     uint64_t CurVarSize = Var->getType()->getSizeInBits();
475     uint64_t CurVarEndInBits = CurVarOffsetInBits + CurVarSize;
476     // Current variable ends before start of fragment, ignore.
477     if (CurVarSize != 0 && /* CurVarSize is known */
478         CurVarEndInBits <= FragmentOffsetInBits)
479       continue;
480 
481     // Current variable fits in (not greater than) the fragment,
482     // does not need fragment expression.
483     if (CurVarSize != 0 && /* CurVarSize is known */
484         CurVarOffsetInBits >= FragmentOffsetInBits &&
485         CurVarEndInBits <= FragmentEndInBits) {
486       uint64_t CurVarOffsetInFragment =
487           (CurVarOffsetInBits - FragmentOffsetInBits) / 8;
488       if (CurVarOffsetInFragment != 0)
489         Expr = DIExpression::get(Expr->getContext(), {dwarf::DW_OP_plus_uconst,
490                                                       CurVarOffsetInFragment});
491       else
492         Expr = DIExpression::get(Expr->getContext(), {});
493       auto *NGVE =
494           DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr);
495       NGV->addDebugInfo(NGVE);
496       continue;
497     }
498     // Current variable does not fit in single fragment,
499     // emit a fragment expression.
500     if (FragmentSizeInBits < VarSize) {
501       if (CurVarOffsetInBits > FragmentOffsetInBits)
502         continue;
503       uint64_t CurVarFragmentOffsetInBits =
504           FragmentOffsetInBits - CurVarOffsetInBits;
505       uint64_t CurVarFragmentSizeInBits = FragmentSizeInBits;
506       if (CurVarSize != 0 && CurVarEndInBits < FragmentEndInBits)
507         CurVarFragmentSizeInBits -= (FragmentEndInBits - CurVarEndInBits);
508       if (CurVarOffsetInBits)
509         Expr = DIExpression::get(Expr->getContext(), {});
510       if (auto E = DIExpression::createFragmentExpression(
511               Expr, CurVarFragmentOffsetInBits, CurVarFragmentSizeInBits))
512         Expr = *E;
513       else
514         continue;
515     }
516     auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr);
517     NGV->addDebugInfo(NGVE);
518   }
519 }
520 
521 /// Perform scalar replacement of aggregates on the specified global variable.
522 /// This opens the door for other optimizations by exposing the behavior of the
523 /// program in a more fine-grained way.  We have determined that this
524 /// transformation is safe already.  We return the first global variable we
525 /// insert so that the caller can reprocess it.
526 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
527   assert(GV->hasLocalLinkage());
528 
529   // Collect types to split into.
530   DenseMap<uint64_t, GlobalPart> Parts;
531   if (!collectSRATypes(Parts, GV, DL) || Parts.empty())
532     return nullptr;
533 
534   // Make sure we don't SRA back to the same type.
535   if (Parts.size() == 1 && Parts.begin()->second.Ty == GV->getValueType())
536     return nullptr;
537 
538   // Don't perform SRA if we would have to split into many globals. Ignore
539   // parts that are either only loaded or only stored, because we expect them
540   // to be optimized away.
541   unsigned NumParts = count_if(Parts, [](const auto &Pair) {
542     return Pair.second.IsLoaded && Pair.second.IsStored;
543   });
544   if (NumParts > 16)
545     return nullptr;
546 
547   // Sort by offset.
548   SmallVector<std::tuple<uint64_t, Type *, Constant *>, 16> TypesVector;
549   for (const auto &Pair : Parts) {
550     TypesVector.push_back(
551         {Pair.first, Pair.second.Ty, Pair.second.Initializer});
552   }
553   sort(TypesVector, llvm::less_first());
554 
555   // Check that the types are non-overlapping.
556   uint64_t Offset = 0;
557   for (const auto &[OffsetForTy, Ty, _] : TypesVector) {
558     // Overlaps with previous type.
559     if (OffsetForTy < Offset)
560       return nullptr;
561 
562     Offset = OffsetForTy + DL.getTypeAllocSize(Ty);
563   }
564 
565   // Some accesses go beyond the end of the global, don't bother.
566   if (Offset > DL.getTypeAllocSize(GV->getValueType()))
567     return nullptr;
568 
569   LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n");
570 
571   // Get the alignment of the global, either explicit or target-specific.
572   Align StartAlignment =
573       DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
574   uint64_t VarSize = DL.getTypeSizeInBits(GV->getValueType());
575 
576   // Create replacement globals.
577   DenseMap<uint64_t, GlobalVariable *> NewGlobals;
578   unsigned NameSuffix = 0;
579   for (auto &[OffsetForTy, Ty, Initializer] : TypesVector) {
580     GlobalVariable *NGV = new GlobalVariable(
581         *GV->getParent(), Ty, false, GlobalVariable::InternalLinkage,
582         Initializer, GV->getName() + "." + Twine(NameSuffix++), GV,
583         GV->getThreadLocalMode(), GV->getAddressSpace());
584     // Start out by copying attributes from the original, including alignment.
585     NGV->copyAttributesFrom(GV);
586     NewGlobals.insert({OffsetForTy, NGV});
587 
588     // Calculate the known alignment of the field.  If the original aggregate
589     // had 256 byte alignment for example, then the element at a given offset
590     // may also have a known alignment, and something might depend on that:
591     // propagate info to each field.
592     Align NewAlign = commonAlignment(StartAlignment, OffsetForTy);
593     NGV->setAlignment(NewAlign);
594 
595     // Copy over the debug info for the variable.
596     transferSRADebugInfo(GV, NGV, OffsetForTy * 8,
597                          DL.getTypeAllocSizeInBits(Ty), VarSize);
598   }
599 
600   // Replace uses of the original global with uses of the new global.
601   SmallVector<Value *, 16> Worklist;
602   SmallPtrSet<Value *, 16> Visited;
603   SmallVector<WeakTrackingVH, 16> DeadInsts;
604   auto AppendUsers = [&](Value *V) {
605     for (User *U : V->users())
606       if (Visited.insert(U).second)
607         Worklist.push_back(U);
608   };
609   AppendUsers(GV);
610   while (!Worklist.empty()) {
611     Value *V = Worklist.pop_back_val();
612     if (isa<BitCastOperator>(V) || isa<AddrSpaceCastOperator>(V) ||
613         isa<GEPOperator>(V)) {
614       AppendUsers(V);
615       if (isa<Instruction>(V))
616         DeadInsts.push_back(V);
617       continue;
618     }
619 
620     if (Value *Ptr = getLoadStorePointerOperand(V)) {
621       APInt Offset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
622       Ptr = Ptr->stripAndAccumulateConstantOffsets(DL, Offset,
623                                                    /* AllowNonInbounds */ true);
624       assert(Ptr == GV && "Load/store must be from/to global");
625       GlobalVariable *NGV = NewGlobals[Offset.getZExtValue()];
626       assert(NGV && "Must have replacement global for this offset");
627 
628       // Update the pointer operand and recalculate alignment.
629       Align PrefAlign = DL.getPrefTypeAlign(getLoadStoreType(V));
630       Align NewAlign =
631           getOrEnforceKnownAlignment(NGV, PrefAlign, DL, cast<Instruction>(V));
632 
633       if (auto *LI = dyn_cast<LoadInst>(V)) {
634         LI->setOperand(0, NGV);
635         LI->setAlignment(NewAlign);
636       } else {
637         auto *SI = cast<StoreInst>(V);
638         SI->setOperand(1, NGV);
639         SI->setAlignment(NewAlign);
640       }
641       continue;
642     }
643 
644     assert(isa<Constant>(V) && isSafeToDestroyConstant(cast<Constant>(V)) &&
645            "Other users can only be dead constants");
646   }
647 
648   // Delete old instructions and global.
649   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
650   GV->removeDeadConstantUsers();
651   GV->eraseFromParent();
652   ++NumSRA;
653 
654   assert(NewGlobals.size() > 0);
655   return NewGlobals.begin()->second;
656 }
657 
658 /// Return true if all users of the specified value will trap if the value is
659 /// dynamically null.  PHIs keeps track of any phi nodes we've seen to avoid
660 /// reprocessing them.
661 static bool AllUsesOfValueWillTrapIfNull(const Value *V,
662                                         SmallPtrSetImpl<const PHINode*> &PHIs) {
663   for (const User *U : V->users()) {
664     if (const Instruction *I = dyn_cast<Instruction>(U)) {
665       // If null pointer is considered valid, then all uses are non-trapping.
666       // Non address-space 0 globals have already been pruned by the caller.
667       if (NullPointerIsDefined(I->getFunction()))
668         return false;
669     }
670     if (isa<LoadInst>(U)) {
671       // Will trap.
672     } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
673       if (SI->getOperand(0) == V) {
674         return false;  // Storing the value.
675       }
676     } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
677       if (CI->getCalledOperand() != V) {
678         return false;  // Not calling the ptr
679       }
680     } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
681       if (II->getCalledOperand() != V) {
682         return false;  // Not calling the ptr
683       }
684     } else if (const AddrSpaceCastInst *CI = dyn_cast<AddrSpaceCastInst>(U)) {
685       if (!AllUsesOfValueWillTrapIfNull(CI, PHIs))
686         return false;
687     } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
688       if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
689     } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
690       // If we've already seen this phi node, ignore it, it has already been
691       // checked.
692       if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
693         return false;
694     } else if (isa<ICmpInst>(U) &&
695                !ICmpInst::isSigned(cast<ICmpInst>(U)->getPredicate()) &&
696                isa<LoadInst>(U->getOperand(0)) &&
697                isa<ConstantPointerNull>(U->getOperand(1))) {
698       assert(isa<GlobalValue>(cast<LoadInst>(U->getOperand(0))
699                                   ->getPointerOperand()
700                                   ->stripPointerCasts()) &&
701              "Should be GlobalVariable");
702       // This and only this kind of non-signed ICmpInst is to be replaced with
703       // the comparing of the value of the created global init bool later in
704       // optimizeGlobalAddressOfAllocation for the global variable.
705     } else {
706       return false;
707     }
708   }
709   return true;
710 }
711 
712 /// Return true if all uses of any loads from GV will trap if the loaded value
713 /// is null.  Note that this also permits comparisons of the loaded value
714 /// against null, as a special case.
715 static bool allUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
716   SmallVector<const Value *, 4> Worklist;
717   Worklist.push_back(GV);
718   while (!Worklist.empty()) {
719     const Value *P = Worklist.pop_back_val();
720     for (const auto *U : P->users()) {
721       if (auto *LI = dyn_cast<LoadInst>(U)) {
722         SmallPtrSet<const PHINode *, 8> PHIs;
723         if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
724           return false;
725       } else if (auto *SI = dyn_cast<StoreInst>(U)) {
726         // Ignore stores to the global.
727         if (SI->getPointerOperand() != P)
728           return false;
729       } else if (auto *CE = dyn_cast<ConstantExpr>(U)) {
730         if (CE->stripPointerCasts() != GV)
731           return false;
732         // Check further the ConstantExpr.
733         Worklist.push_back(CE);
734       } else {
735         // We don't know or understand this user, bail out.
736         return false;
737       }
738     }
739   }
740 
741   return true;
742 }
743 
744 /// Get all the loads/store uses for global variable \p GV.
745 static void allUsesOfLoadAndStores(GlobalVariable *GV,
746                                    SmallVector<Value *, 4> &Uses) {
747   SmallVector<Value *, 4> Worklist;
748   Worklist.push_back(GV);
749   while (!Worklist.empty()) {
750     auto *P = Worklist.pop_back_val();
751     for (auto *U : P->users()) {
752       if (auto *CE = dyn_cast<ConstantExpr>(U)) {
753         Worklist.push_back(CE);
754         continue;
755       }
756 
757       assert((isa<LoadInst>(U) || isa<StoreInst>(U)) &&
758              "Expect only load or store instructions");
759       Uses.push_back(U);
760     }
761   }
762 }
763 
764 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
765   bool Changed = false;
766   for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
767     Instruction *I = cast<Instruction>(*UI++);
768     // Uses are non-trapping if null pointer is considered valid.
769     // Non address-space 0 globals are already pruned by the caller.
770     if (NullPointerIsDefined(I->getFunction()))
771       return false;
772     if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
773       LI->setOperand(0, NewV);
774       Changed = true;
775     } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
776       if (SI->getOperand(1) == V) {
777         SI->setOperand(1, NewV);
778         Changed = true;
779       }
780     } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
781       CallBase *CB = cast<CallBase>(I);
782       if (CB->getCalledOperand() == V) {
783         // Calling through the pointer!  Turn into a direct call, but be careful
784         // that the pointer is not also being passed as an argument.
785         CB->setCalledOperand(NewV);
786         Changed = true;
787         bool PassedAsArg = false;
788         for (unsigned i = 0, e = CB->arg_size(); i != e; ++i)
789           if (CB->getArgOperand(i) == V) {
790             PassedAsArg = true;
791             CB->setArgOperand(i, NewV);
792           }
793 
794         if (PassedAsArg) {
795           // Being passed as an argument also.  Be careful to not invalidate UI!
796           UI = V->user_begin();
797         }
798       }
799     } else if (AddrSpaceCastInst *CI = dyn_cast<AddrSpaceCastInst>(I)) {
800       Changed |= OptimizeAwayTrappingUsesOfValue(
801           CI, ConstantExpr::getAddrSpaceCast(NewV, CI->getType()));
802       if (CI->use_empty()) {
803         Changed = true;
804         CI->eraseFromParent();
805       }
806     } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
807       // Should handle GEP here.
808       SmallVector<Constant*, 8> Idxs;
809       Idxs.reserve(GEPI->getNumOperands()-1);
810       for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
811            i != e; ++i)
812         if (Constant *C = dyn_cast<Constant>(*i))
813           Idxs.push_back(C);
814         else
815           break;
816       if (Idxs.size() == GEPI->getNumOperands()-1)
817         Changed |= OptimizeAwayTrappingUsesOfValue(
818             GEPI, ConstantExpr::getGetElementPtr(GEPI->getSourceElementType(),
819                                                  NewV, Idxs));
820       if (GEPI->use_empty()) {
821         Changed = true;
822         GEPI->eraseFromParent();
823       }
824     }
825   }
826 
827   return Changed;
828 }
829 
830 /// The specified global has only one non-null value stored into it.  If there
831 /// are uses of the loaded value that would trap if the loaded value is
832 /// dynamically null, then we know that they cannot be reachable with a null
833 /// optimize away the load.
834 static bool OptimizeAwayTrappingUsesOfLoads(
835     GlobalVariable *GV, Constant *LV, const DataLayout &DL,
836     function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
837   bool Changed = false;
838 
839   // Keep track of whether we are able to remove all the uses of the global
840   // other than the store that defines it.
841   bool AllNonStoreUsesGone = true;
842 
843   // Replace all uses of loads with uses of uses of the stored value.
844   for (User *GlobalUser : llvm::make_early_inc_range(GV->users())) {
845     if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
846       Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
847       // If we were able to delete all uses of the loads
848       if (LI->use_empty()) {
849         LI->eraseFromParent();
850         Changed = true;
851       } else {
852         AllNonStoreUsesGone = false;
853       }
854     } else if (isa<StoreInst>(GlobalUser)) {
855       // Ignore the store that stores "LV" to the global.
856       assert(GlobalUser->getOperand(1) == GV &&
857              "Must be storing *to* the global");
858     } else {
859       AllNonStoreUsesGone = false;
860 
861       // If we get here we could have other crazy uses that are transitively
862       // loaded.
863       assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
864               isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
865               isa<BitCastInst>(GlobalUser) ||
866               isa<GetElementPtrInst>(GlobalUser) ||
867               isa<AddrSpaceCastInst>(GlobalUser)) &&
868              "Only expect load and stores!");
869     }
870   }
871 
872   if (Changed) {
873     LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV
874                       << "\n");
875     ++NumGlobUses;
876   }
877 
878   // If we nuked all of the loads, then none of the stores are needed either,
879   // nor is the global.
880   if (AllNonStoreUsesGone) {
881     if (isLeakCheckerRoot(GV)) {
882       Changed |= CleanupPointerRootUsers(GV, GetTLI);
883     } else {
884       Changed = true;
885       CleanupConstantGlobalUsers(GV, DL);
886     }
887     if (GV->use_empty()) {
888       LLVM_DEBUG(dbgs() << "  *** GLOBAL NOW DEAD!\n");
889       Changed = true;
890       GV->eraseFromParent();
891       ++NumDeleted;
892     }
893   }
894   return Changed;
895 }
896 
897 /// Walk the use list of V, constant folding all of the instructions that are
898 /// foldable.
899 static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
900                                 TargetLibraryInfo *TLI) {
901   for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
902     if (Instruction *I = dyn_cast<Instruction>(*UI++))
903       if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
904         I->replaceAllUsesWith(NewC);
905 
906         // Advance UI to the next non-I use to avoid invalidating it!
907         // Instructions could multiply use V.
908         while (UI != E && *UI == I)
909           ++UI;
910         if (isInstructionTriviallyDead(I, TLI))
911           I->eraseFromParent();
912       }
913 }
914 
915 /// This function takes the specified global variable, and transforms the
916 /// program as if it always contained the result of the specified malloc.
917 /// Because it is always the result of the specified malloc, there is no reason
918 /// to actually DO the malloc.  Instead, turn the malloc into a global, and any
919 /// loads of GV as uses of the new global.
920 static GlobalVariable *
921 OptimizeGlobalAddressOfAllocation(GlobalVariable *GV, CallInst *CI,
922                                   uint64_t AllocSize, Constant *InitVal,
923                                   const DataLayout &DL,
924                                   TargetLibraryInfo *TLI) {
925   LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << "  CALL = " << *CI
926                     << '\n');
927 
928   // Create global of type [AllocSize x i8].
929   Type *GlobalType = ArrayType::get(Type::getInt8Ty(GV->getContext()),
930                                     AllocSize);
931 
932   // Create the new global variable.  The contents of the allocated memory is
933   // undefined initially, so initialize with an undef value.
934   GlobalVariable *NewGV = new GlobalVariable(
935       *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage,
936       UndefValue::get(GlobalType), GV->getName() + ".body", nullptr,
937       GV->getThreadLocalMode());
938 
939   // Initialize the global at the point of the original call.  Note that this
940   // is a different point from the initialization referred to below for the
941   // nullability handling.  Sublety: We have not proven the original global was
942   // only initialized once.  As such, we can not fold this into the initializer
943   // of the new global as may need to re-init the storage multiple times.
944   if (!isa<UndefValue>(InitVal)) {
945     IRBuilder<> Builder(CI->getNextNode());
946     // TODO: Use alignment above if align!=1
947     Builder.CreateMemSet(NewGV, InitVal, AllocSize, std::nullopt);
948   }
949 
950   // Update users of the allocation to use the new global instead.
951   CI->replaceAllUsesWith(NewGV);
952 
953   // If there is a comparison against null, we will insert a global bool to
954   // keep track of whether the global was initialized yet or not.
955   GlobalVariable *InitBool = new GlobalVariable(
956       Type::getInt1Ty(GV->getContext()), false, GlobalValue::InternalLinkage,
957       ConstantInt::getFalse(GV->getContext()), GV->getName() + ".init",
958       GV->getThreadLocalMode(), GV->getAddressSpace());
959   bool InitBoolUsed = false;
960 
961   // Loop over all instruction uses of GV, processing them in turn.
962   SmallVector<Value *, 4> Guses;
963   allUsesOfLoadAndStores(GV, Guses);
964   for (auto *U : Guses) {
965     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
966       // The global is initialized when the store to it occurs. If the stored
967       // value is null value, the global bool is set to false, otherwise true.
968       new StoreInst(ConstantInt::getBool(
969                         GV->getContext(),
970                         !isa<ConstantPointerNull>(SI->getValueOperand())),
971                     InitBool, false, Align(1), SI->getOrdering(),
972                     SI->getSyncScopeID(), SI->getIterator());
973       SI->eraseFromParent();
974       continue;
975     }
976 
977     LoadInst *LI = cast<LoadInst>(U);
978     while (!LI->use_empty()) {
979       Use &LoadUse = *LI->use_begin();
980       ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
981       if (!ICI) {
982         LoadUse.set(NewGV);
983         continue;
984       }
985 
986       // Replace the cmp X, 0 with a use of the bool value.
987       Value *LV = new LoadInst(InitBool->getValueType(), InitBool,
988                                InitBool->getName() + ".val", false, Align(1),
989                                LI->getOrdering(), LI->getSyncScopeID(),
990                                LI->getIterator());
991       InitBoolUsed = true;
992       switch (ICI->getPredicate()) {
993       default: llvm_unreachable("Unknown ICmp Predicate!");
994       case ICmpInst::ICMP_ULT: // X < null -> always false
995         LV = ConstantInt::getFalse(GV->getContext());
996         break;
997       case ICmpInst::ICMP_UGE: // X >= null -> always true
998         LV = ConstantInt::getTrue(GV->getContext());
999         break;
1000       case ICmpInst::ICMP_ULE:
1001       case ICmpInst::ICMP_EQ:
1002         LV = BinaryOperator::CreateNot(LV, "notinit", ICI->getIterator());
1003         break;
1004       case ICmpInst::ICMP_NE:
1005       case ICmpInst::ICMP_UGT:
1006         break;  // no change.
1007       }
1008       ICI->replaceAllUsesWith(LV);
1009       ICI->eraseFromParent();
1010     }
1011     LI->eraseFromParent();
1012   }
1013 
1014   // If the initialization boolean was used, insert it, otherwise delete it.
1015   if (!InitBoolUsed) {
1016     while (!InitBool->use_empty())  // Delete initializations
1017       cast<StoreInst>(InitBool->user_back())->eraseFromParent();
1018     delete InitBool;
1019   } else
1020     GV->getParent()->insertGlobalVariable(GV->getIterator(), InitBool);
1021 
1022   // Now the GV is dead, nuke it and the allocation..
1023   GV->eraseFromParent();
1024   CI->eraseFromParent();
1025 
1026   // To further other optimizations, loop over all users of NewGV and try to
1027   // constant prop them.  This will promote GEP instructions with constant
1028   // indices into GEP constant-exprs, which will allow global-opt to hack on it.
1029   ConstantPropUsersOf(NewGV, DL, TLI);
1030 
1031   return NewGV;
1032 }
1033 
1034 /// Scan the use-list of GV checking to make sure that there are no complex uses
1035 /// of GV.  We permit simple things like dereferencing the pointer, but not
1036 /// storing through the address, unless it is to the specified global.
1037 static bool
1038 valueIsOnlyUsedLocallyOrStoredToOneGlobal(const CallInst *CI,
1039                                           const GlobalVariable *GV) {
1040   SmallPtrSet<const Value *, 4> Visited;
1041   SmallVector<const Value *, 4> Worklist;
1042   Worklist.push_back(CI);
1043 
1044   while (!Worklist.empty()) {
1045     const Value *V = Worklist.pop_back_val();
1046     if (!Visited.insert(V).second)
1047       continue;
1048 
1049     for (const Use &VUse : V->uses()) {
1050       const User *U = VUse.getUser();
1051       if (isa<LoadInst>(U) || isa<CmpInst>(U))
1052         continue; // Fine, ignore.
1053 
1054       if (auto *SI = dyn_cast<StoreInst>(U)) {
1055         if (SI->getValueOperand() == V &&
1056             SI->getPointerOperand()->stripPointerCasts() != GV)
1057           return false; // Storing the pointer not into GV... bad.
1058         continue; // Otherwise, storing through it, or storing into GV... fine.
1059       }
1060 
1061       if (auto *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1062         Worklist.push_back(GEPI);
1063         continue;
1064       }
1065 
1066       return false;
1067     }
1068   }
1069 
1070   return true;
1071 }
1072 
1073 /// If we have a global that is only initialized with a fixed size allocation
1074 /// try to transform the program to use global memory instead of heap
1075 /// allocated memory. This eliminates dynamic allocation, avoids an indirection
1076 /// accessing the data, and exposes the resultant global to further GlobalOpt.
1077 static bool tryToOptimizeStoreOfAllocationToGlobal(GlobalVariable *GV,
1078                                                    CallInst *CI,
1079                                                    const DataLayout &DL,
1080                                                    TargetLibraryInfo *TLI) {
1081   if (!isRemovableAlloc(CI, TLI))
1082     // Must be able to remove the call when we get done..
1083     return false;
1084 
1085   Type *Int8Ty = Type::getInt8Ty(CI->getFunction()->getContext());
1086   Constant *InitVal = getInitialValueOfAllocation(CI, TLI, Int8Ty);
1087   if (!InitVal)
1088     // Must be able to emit a memset for initialization
1089     return false;
1090 
1091   uint64_t AllocSize;
1092   if (!getObjectSize(CI, AllocSize, DL, TLI, ObjectSizeOpts()))
1093     return false;
1094 
1095   // Restrict this transformation to only working on small allocations
1096   // (2048 bytes currently), as we don't want to introduce a 16M global or
1097   // something.
1098   if (AllocSize >= 2048)
1099     return false;
1100 
1101   // We can't optimize this global unless all uses of it are *known* to be
1102   // of the malloc value, not of the null initializer value (consider a use
1103   // that compares the global's value against zero to see if the malloc has
1104   // been reached).  To do this, we check to see if all uses of the global
1105   // would trap if the global were null: this proves that they must all
1106   // happen after the malloc.
1107   if (!allUsesOfLoadedValueWillTrapIfNull(GV))
1108     return false;
1109 
1110   // We can't optimize this if the malloc itself is used in a complex way,
1111   // for example, being stored into multiple globals.  This allows the
1112   // malloc to be stored into the specified global, loaded, gep, icmp'd.
1113   // These are all things we could transform to using the global for.
1114   if (!valueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV))
1115     return false;
1116 
1117   OptimizeGlobalAddressOfAllocation(GV, CI, AllocSize, InitVal, DL, TLI);
1118   return true;
1119 }
1120 
1121 // Try to optimize globals based on the knowledge that only one value (besides
1122 // its initializer) is ever stored to the global.
1123 static bool
1124 optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1125                          const DataLayout &DL,
1126                          function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
1127   // Ignore no-op GEPs and bitcasts.
1128   StoredOnceVal = StoredOnceVal->stripPointerCasts();
1129 
1130   // If we are dealing with a pointer global that is initialized to null and
1131   // only has one (non-null) value stored into it, then we can optimize any
1132   // users of the loaded value (often calls and loads) that would trap if the
1133   // value was null.
1134   if (GV->getInitializer()->getType()->isPointerTy() &&
1135       GV->getInitializer()->isNullValue() &&
1136       StoredOnceVal->getType()->isPointerTy() &&
1137       !NullPointerIsDefined(
1138           nullptr /* F */,
1139           GV->getInitializer()->getType()->getPointerAddressSpace())) {
1140     if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1141       // Optimize away any trapping uses of the loaded value.
1142       if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, GetTLI))
1143         return true;
1144     } else if (isAllocationFn(StoredOnceVal, GetTLI)) {
1145       if (auto *CI = dyn_cast<CallInst>(StoredOnceVal)) {
1146         auto *TLI = &GetTLI(*CI->getFunction());
1147         if (tryToOptimizeStoreOfAllocationToGlobal(GV, CI, DL, TLI))
1148           return true;
1149       }
1150     }
1151   }
1152 
1153   return false;
1154 }
1155 
1156 /// At this point, we have learned that the only two values ever stored into GV
1157 /// are its initializer and OtherVal.  See if we can shrink the global into a
1158 /// boolean and select between the two values whenever it is used.  This exposes
1159 /// the values to other scalar optimizations.
1160 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
1161   Type *GVElType = GV->getValueType();
1162 
1163   // If GVElType is already i1, it is already shrunk.  If the type of the GV is
1164   // an FP value, pointer or vector, don't do this optimization because a select
1165   // between them is very expensive and unlikely to lead to later
1166   // simplification.  In these cases, we typically end up with "cond ? v1 : v2"
1167   // where v1 and v2 both require constant pool loads, a big loss.
1168   if (GVElType == Type::getInt1Ty(GV->getContext()) ||
1169       GVElType->isFloatingPointTy() ||
1170       GVElType->isPointerTy() || GVElType->isVectorTy())
1171     return false;
1172 
1173   // Walk the use list of the global seeing if all the uses are load or store.
1174   // If there is anything else, bail out.
1175   for (User *U : GV->users()) {
1176     if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
1177       return false;
1178     if (getLoadStoreType(U) != GVElType)
1179       return false;
1180   }
1181 
1182   LLVM_DEBUG(dbgs() << "   *** SHRINKING TO BOOL: " << *GV << "\n");
1183 
1184   // Create the new global, initializing it to false.
1185   GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
1186                                              false,
1187                                              GlobalValue::InternalLinkage,
1188                                         ConstantInt::getFalse(GV->getContext()),
1189                                              GV->getName()+".b",
1190                                              GV->getThreadLocalMode(),
1191                                              GV->getType()->getAddressSpace());
1192   NewGV->copyAttributesFrom(GV);
1193   GV->getParent()->insertGlobalVariable(GV->getIterator(), NewGV);
1194 
1195   Constant *InitVal = GV->getInitializer();
1196   assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
1197          "No reason to shrink to bool!");
1198 
1199   SmallVector<DIGlobalVariableExpression *, 1> GVs;
1200   GV->getDebugInfo(GVs);
1201 
1202   // If initialized to zero and storing one into the global, we can use a cast
1203   // instead of a select to synthesize the desired value.
1204   bool IsOneZero = false;
1205   bool EmitOneOrZero = true;
1206   auto *CI = dyn_cast<ConstantInt>(OtherVal);
1207   if (CI && CI->getValue().getActiveBits() <= 64) {
1208     IsOneZero = InitVal->isNullValue() && CI->isOne();
1209 
1210     auto *CIInit = dyn_cast<ConstantInt>(GV->getInitializer());
1211     if (CIInit && CIInit->getValue().getActiveBits() <= 64) {
1212       uint64_t ValInit = CIInit->getZExtValue();
1213       uint64_t ValOther = CI->getZExtValue();
1214       uint64_t ValMinus = ValOther - ValInit;
1215 
1216       for(auto *GVe : GVs){
1217         DIGlobalVariable *DGV = GVe->getVariable();
1218         DIExpression *E = GVe->getExpression();
1219         const DataLayout &DL = GV->getDataLayout();
1220         unsigned SizeInOctets =
1221             DL.getTypeAllocSizeInBits(NewGV->getValueType()) / 8;
1222 
1223         // It is expected that the address of global optimized variable is on
1224         // top of the stack. After optimization, value of that variable will
1225         // be ether 0 for initial value or 1 for other value. The following
1226         // expression should return constant integer value depending on the
1227         // value at global object address:
1228         // val * (ValOther - ValInit) + ValInit:
1229         // DW_OP_deref DW_OP_constu <ValMinus>
1230         // DW_OP_mul DW_OP_constu <ValInit> DW_OP_plus DW_OP_stack_value
1231         SmallVector<uint64_t, 12> Ops = {
1232             dwarf::DW_OP_deref_size, SizeInOctets,
1233             dwarf::DW_OP_constu, ValMinus,
1234             dwarf::DW_OP_mul, dwarf::DW_OP_constu, ValInit,
1235             dwarf::DW_OP_plus};
1236         bool WithStackValue = true;
1237         E = DIExpression::prependOpcodes(E, Ops, WithStackValue);
1238         DIGlobalVariableExpression *DGVE =
1239           DIGlobalVariableExpression::get(NewGV->getContext(), DGV, E);
1240         NewGV->addDebugInfo(DGVE);
1241      }
1242      EmitOneOrZero = false;
1243     }
1244   }
1245 
1246   if (EmitOneOrZero) {
1247      // FIXME: This will only emit address for debugger on which will
1248      // be written only 0 or 1.
1249      for(auto *GV : GVs)
1250        NewGV->addDebugInfo(GV);
1251    }
1252 
1253   while (!GV->use_empty()) {
1254     Instruction *UI = cast<Instruction>(GV->user_back());
1255     if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1256       // Change the store into a boolean store.
1257       bool StoringOther = SI->getOperand(0) == OtherVal;
1258       // Only do this if we weren't storing a loaded value.
1259       Value *StoreVal;
1260       if (StoringOther || SI->getOperand(0) == InitVal) {
1261         StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
1262                                     StoringOther);
1263       } else {
1264         // Otherwise, we are storing a previously loaded copy.  To do this,
1265         // change the copy from copying the original value to just copying the
1266         // bool.
1267         Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
1268 
1269         // If we've already replaced the input, StoredVal will be a cast or
1270         // select instruction.  If not, it will be a load of the original
1271         // global.
1272         if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
1273           assert(LI->getOperand(0) == GV && "Not a copy!");
1274           // Insert a new load, to preserve the saved value.
1275           StoreVal =
1276               new LoadInst(NewGV->getValueType(), NewGV, LI->getName() + ".b",
1277                            false, Align(1), LI->getOrdering(),
1278                            LI->getSyncScopeID(), LI->getIterator());
1279         } else {
1280           assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
1281                  "This is not a form that we understand!");
1282           StoreVal = StoredVal->getOperand(0);
1283           assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
1284         }
1285       }
1286       StoreInst *NSI =
1287           new StoreInst(StoreVal, NewGV, false, Align(1), SI->getOrdering(),
1288                         SI->getSyncScopeID(), SI->getIterator());
1289       NSI->setDebugLoc(SI->getDebugLoc());
1290     } else {
1291       // Change the load into a load of bool then a select.
1292       LoadInst *LI = cast<LoadInst>(UI);
1293       LoadInst *NLI = new LoadInst(
1294           NewGV->getValueType(), NewGV, LI->getName() + ".b", false, Align(1),
1295           LI->getOrdering(), LI->getSyncScopeID(), LI->getIterator());
1296       Instruction *NSI;
1297       if (IsOneZero)
1298         NSI = new ZExtInst(NLI, LI->getType(), "", LI->getIterator());
1299       else
1300         NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI->getIterator());
1301       NSI->takeName(LI);
1302       // Since LI is split into two instructions, NLI and NSI both inherit the
1303       // same DebugLoc
1304       NLI->setDebugLoc(LI->getDebugLoc());
1305       NSI->setDebugLoc(LI->getDebugLoc());
1306       LI->replaceAllUsesWith(NSI);
1307     }
1308     UI->eraseFromParent();
1309   }
1310 
1311   // Retain the name of the old global variable. People who are debugging their
1312   // programs may expect these variables to be named the same.
1313   NewGV->takeName(GV);
1314   GV->eraseFromParent();
1315   return true;
1316 }
1317 
1318 static bool
1319 deleteIfDead(GlobalValue &GV,
1320              SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats,
1321              function_ref<void(Function &)> DeleteFnCallback = nullptr) {
1322   GV.removeDeadConstantUsers();
1323 
1324   if (!GV.isDiscardableIfUnused() && !GV.isDeclaration())
1325     return false;
1326 
1327   if (const Comdat *C = GV.getComdat())
1328     if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C))
1329       return false;
1330 
1331   bool Dead;
1332   if (auto *F = dyn_cast<Function>(&GV))
1333     Dead = (F->isDeclaration() && F->use_empty()) || F->isDefTriviallyDead();
1334   else
1335     Dead = GV.use_empty();
1336   if (!Dead)
1337     return false;
1338 
1339   LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n");
1340   if (auto *F = dyn_cast<Function>(&GV)) {
1341     if (DeleteFnCallback)
1342       DeleteFnCallback(*F);
1343   }
1344   ReplaceableMetadataImpl::SalvageDebugInfo(GV);
1345   GV.eraseFromParent();
1346   ++NumDeleted;
1347   return true;
1348 }
1349 
1350 static bool isPointerValueDeadOnEntryToFunction(
1351     const Function *F, GlobalValue *GV,
1352     function_ref<DominatorTree &(Function &)> LookupDomTree) {
1353   // Find all uses of GV. We expect them all to be in F, and if we can't
1354   // identify any of the uses we bail out.
1355   //
1356   // On each of these uses, identify if the memory that GV points to is
1357   // used/required/live at the start of the function. If it is not, for example
1358   // if the first thing the function does is store to the GV, the GV can
1359   // possibly be demoted.
1360   //
1361   // We don't do an exhaustive search for memory operations - simply look
1362   // through bitcasts as they're quite common and benign.
1363   const DataLayout &DL = GV->getDataLayout();
1364   SmallVector<LoadInst *, 4> Loads;
1365   SmallVector<StoreInst *, 4> Stores;
1366   for (auto *U : GV->users()) {
1367     Instruction *I = dyn_cast<Instruction>(U);
1368     if (!I)
1369       return false;
1370     assert(I->getParent()->getParent() == F);
1371 
1372     if (auto *LI = dyn_cast<LoadInst>(I))
1373       Loads.push_back(LI);
1374     else if (auto *SI = dyn_cast<StoreInst>(I))
1375       Stores.push_back(SI);
1376     else
1377       return false;
1378   }
1379 
1380   // We have identified all uses of GV into loads and stores. Now check if all
1381   // of them are known not to depend on the value of the global at the function
1382   // entry point. We do this by ensuring that every load is dominated by at
1383   // least one store.
1384   auto &DT = LookupDomTree(*const_cast<Function *>(F));
1385 
1386   // The below check is quadratic. Check we're not going to do too many tests.
1387   // FIXME: Even though this will always have worst-case quadratic time, we
1388   // could put effort into minimizing the average time by putting stores that
1389   // have been shown to dominate at least one load at the beginning of the
1390   // Stores array, making subsequent dominance checks more likely to succeed
1391   // early.
1392   //
1393   // The threshold here is fairly large because global->local demotion is a
1394   // very powerful optimization should it fire.
1395   const unsigned Threshold = 100;
1396   if (Loads.size() * Stores.size() > Threshold)
1397     return false;
1398 
1399   for (auto *L : Loads) {
1400     auto *LTy = L->getType();
1401     if (none_of(Stores, [&](const StoreInst *S) {
1402           auto *STy = S->getValueOperand()->getType();
1403           // The load is only dominated by the store if DomTree says so
1404           // and the number of bits loaded in L is less than or equal to
1405           // the number of bits stored in S.
1406           return DT.dominates(S, L) &&
1407                  DL.getTypeStoreSize(LTy).getFixedValue() <=
1408                      DL.getTypeStoreSize(STy).getFixedValue();
1409         }))
1410       return false;
1411   }
1412   // All loads have known dependences inside F, so the global can be localized.
1413   return true;
1414 }
1415 
1416 // For a global variable with one store, if the store dominates any loads,
1417 // those loads will always load the stored value (as opposed to the
1418 // initializer), even in the presence of recursion.
1419 static bool forwardStoredOnceStore(
1420     GlobalVariable *GV, const StoreInst *StoredOnceStore,
1421     function_ref<DominatorTree &(Function &)> LookupDomTree) {
1422   const Value *StoredOnceValue = StoredOnceStore->getValueOperand();
1423   // We can do this optimization for non-constants in nosync + norecurse
1424   // functions, but globals used in exactly one norecurse functions are already
1425   // promoted to an alloca.
1426   if (!isa<Constant>(StoredOnceValue))
1427     return false;
1428   const Function *F = StoredOnceStore->getFunction();
1429   SmallVector<LoadInst *> Loads;
1430   for (User *U : GV->users()) {
1431     if (auto *LI = dyn_cast<LoadInst>(U)) {
1432       if (LI->getFunction() == F &&
1433           LI->getType() == StoredOnceValue->getType() && LI->isSimple())
1434         Loads.push_back(LI);
1435     }
1436   }
1437   // Only compute DT if we have any loads to examine.
1438   bool MadeChange = false;
1439   if (!Loads.empty()) {
1440     auto &DT = LookupDomTree(*const_cast<Function *>(F));
1441     for (auto *LI : Loads) {
1442       if (DT.dominates(StoredOnceStore, LI)) {
1443         LI->replaceAllUsesWith(const_cast<Value *>(StoredOnceValue));
1444         LI->eraseFromParent();
1445         MadeChange = true;
1446       }
1447     }
1448   }
1449   return MadeChange;
1450 }
1451 
1452 /// Analyze the specified global variable and optimize
1453 /// it if possible.  If we make a change, return true.
1454 static bool
1455 processInternalGlobal(GlobalVariable *GV, const GlobalStatus &GS,
1456                       function_ref<TargetTransformInfo &(Function &)> GetTTI,
1457                       function_ref<TargetLibraryInfo &(Function &)> GetTLI,
1458                       function_ref<DominatorTree &(Function &)> LookupDomTree) {
1459   auto &DL = GV->getDataLayout();
1460   // If this is a first class global and has only one accessing function and
1461   // this function is non-recursive, we replace the global with a local alloca
1462   // in this function.
1463   //
1464   // NOTE: It doesn't make sense to promote non-single-value types since we
1465   // are just replacing static memory to stack memory.
1466   //
1467   // If the global is in different address space, don't bring it to stack.
1468   if (!GS.HasMultipleAccessingFunctions &&
1469       GS.AccessingFunction &&
1470       GV->getValueType()->isSingleValueType() &&
1471       GV->getType()->getAddressSpace() == DL.getAllocaAddrSpace() &&
1472       !GV->isExternallyInitialized() &&
1473       GS.AccessingFunction->doesNotRecurse() &&
1474       isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV,
1475                                           LookupDomTree)) {
1476     const DataLayout &DL = GV->getDataLayout();
1477 
1478     LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n");
1479     BasicBlock::iterator FirstI =
1480         GS.AccessingFunction->getEntryBlock().begin().getNonConst();
1481     Type *ElemTy = GV->getValueType();
1482     // FIXME: Pass Global's alignment when globals have alignment
1483     AllocaInst *Alloca = new AllocaInst(ElemTy, DL.getAllocaAddrSpace(),
1484                                         nullptr, GV->getName(), FirstI);
1485     if (!isa<UndefValue>(GV->getInitializer()))
1486       new StoreInst(GV->getInitializer(), Alloca, FirstI);
1487 
1488     GV->replaceAllUsesWith(Alloca);
1489     GV->eraseFromParent();
1490     ++NumLocalized;
1491     return true;
1492   }
1493 
1494   bool Changed = false;
1495 
1496   // If the global is never loaded (but may be stored to), it is dead.
1497   // Delete it now.
1498   if (!GS.IsLoaded) {
1499     LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n");
1500 
1501     if (isLeakCheckerRoot(GV)) {
1502       // Delete any constant stores to the global.
1503       Changed = CleanupPointerRootUsers(GV, GetTLI);
1504     } else {
1505       // Delete any stores we can find to the global.  We may not be able to
1506       // make it completely dead though.
1507       Changed = CleanupConstantGlobalUsers(GV, DL);
1508     }
1509 
1510     // If the global is dead now, delete it.
1511     if (GV->use_empty()) {
1512       GV->eraseFromParent();
1513       ++NumDeleted;
1514       Changed = true;
1515     }
1516     return Changed;
1517 
1518   }
1519   if (GS.StoredType <= GlobalStatus::InitializerStored) {
1520     LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n");
1521 
1522     // Don't actually mark a global constant if it's atomic because atomic loads
1523     // are implemented by a trivial cmpxchg in some edge-cases and that usually
1524     // requires write access to the variable even if it's not actually changed.
1525     if (GS.Ordering == AtomicOrdering::NotAtomic) {
1526       assert(!GV->isConstant() && "Expected a non-constant global");
1527       GV->setConstant(true);
1528       Changed = true;
1529     }
1530 
1531     // Clean up any obviously simplifiable users now.
1532     Changed |= CleanupConstantGlobalUsers(GV, DL);
1533 
1534     // If the global is dead now, just nuke it.
1535     if (GV->use_empty()) {
1536       LLVM_DEBUG(dbgs() << "   *** Marking constant allowed us to simplify "
1537                         << "all users and delete global!\n");
1538       GV->eraseFromParent();
1539       ++NumDeleted;
1540       return true;
1541     }
1542 
1543     // Fall through to the next check; see if we can optimize further.
1544     ++NumMarked;
1545   }
1546   if (!GV->getInitializer()->getType()->isSingleValueType()) {
1547     const DataLayout &DL = GV->getDataLayout();
1548     if (SRAGlobal(GV, DL))
1549       return true;
1550   }
1551   Value *StoredOnceValue = GS.getStoredOnceValue();
1552   if (GS.StoredType == GlobalStatus::StoredOnce && StoredOnceValue) {
1553     Function &StoreFn =
1554         const_cast<Function &>(*GS.StoredOnceStore->getFunction());
1555     bool CanHaveNonUndefGlobalInitializer =
1556         GetTTI(StoreFn).canHaveNonUndefGlobalInitializerInAddressSpace(
1557             GV->getType()->getAddressSpace());
1558     // If the initial value for the global was an undef value, and if only
1559     // one other value was stored into it, we can just change the
1560     // initializer to be the stored value, then delete all stores to the
1561     // global.  This allows us to mark it constant.
1562     // This is restricted to address spaces that allow globals to have
1563     // initializers. NVPTX, for example, does not support initializers for
1564     // shared memory (AS 3).
1565     auto *SOVConstant = dyn_cast<Constant>(StoredOnceValue);
1566     if (SOVConstant && isa<UndefValue>(GV->getInitializer()) &&
1567         DL.getTypeAllocSize(SOVConstant->getType()) ==
1568             DL.getTypeAllocSize(GV->getValueType()) &&
1569         CanHaveNonUndefGlobalInitializer) {
1570       if (SOVConstant->getType() == GV->getValueType()) {
1571         // Change the initializer in place.
1572         GV->setInitializer(SOVConstant);
1573       } else {
1574         // Create a new global with adjusted type.
1575         auto *NGV = new GlobalVariable(
1576             *GV->getParent(), SOVConstant->getType(), GV->isConstant(),
1577             GV->getLinkage(), SOVConstant, "", GV, GV->getThreadLocalMode(),
1578             GV->getAddressSpace());
1579         NGV->takeName(GV);
1580         NGV->copyAttributesFrom(GV);
1581         GV->replaceAllUsesWith(NGV);
1582         GV->eraseFromParent();
1583         GV = NGV;
1584       }
1585 
1586       // Clean up any obviously simplifiable users now.
1587       CleanupConstantGlobalUsers(GV, DL);
1588 
1589       if (GV->use_empty()) {
1590         LLVM_DEBUG(dbgs() << "   *** Substituting initializer allowed us to "
1591                           << "simplify all users and delete global!\n");
1592         GV->eraseFromParent();
1593         ++NumDeleted;
1594       }
1595       ++NumSubstitute;
1596       return true;
1597     }
1598 
1599     // Try to optimize globals based on the knowledge that only one value
1600     // (besides its initializer) is ever stored to the global.
1601     if (optimizeOnceStoredGlobal(GV, StoredOnceValue, DL, GetTLI))
1602       return true;
1603 
1604     // Try to forward the store to any loads. If we have more than one store, we
1605     // may have a store of the initializer between StoredOnceStore and a load.
1606     if (GS.NumStores == 1)
1607       if (forwardStoredOnceStore(GV, GS.StoredOnceStore, LookupDomTree))
1608         return true;
1609 
1610     // Otherwise, if the global was not a boolean, we can shrink it to be a
1611     // boolean. Skip this optimization for AS that doesn't allow an initializer.
1612     if (SOVConstant && GS.Ordering == AtomicOrdering::NotAtomic &&
1613         (!isa<UndefValue>(GV->getInitializer()) ||
1614          CanHaveNonUndefGlobalInitializer)) {
1615       if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
1616         ++NumShrunkToBool;
1617         return true;
1618       }
1619     }
1620   }
1621 
1622   return Changed;
1623 }
1624 
1625 /// Analyze the specified global variable and optimize it if possible.  If we
1626 /// make a change, return true.
1627 static bool
1628 processGlobal(GlobalValue &GV,
1629               function_ref<TargetTransformInfo &(Function &)> GetTTI,
1630               function_ref<TargetLibraryInfo &(Function &)> GetTLI,
1631               function_ref<DominatorTree &(Function &)> LookupDomTree) {
1632   if (GV.getName().starts_with("llvm."))
1633     return false;
1634 
1635   GlobalStatus GS;
1636 
1637   if (GlobalStatus::analyzeGlobal(&GV, GS))
1638     return false;
1639 
1640   bool Changed = false;
1641   if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) {
1642     auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global
1643                                                : GlobalValue::UnnamedAddr::Local;
1644     if (NewUnnamedAddr != GV.getUnnamedAddr()) {
1645       GV.setUnnamedAddr(NewUnnamedAddr);
1646       NumUnnamed++;
1647       Changed = true;
1648     }
1649   }
1650 
1651   // Do more involved optimizations if the global is internal.
1652   if (!GV.hasLocalLinkage())
1653     return Changed;
1654 
1655   auto *GVar = dyn_cast<GlobalVariable>(&GV);
1656   if (!GVar)
1657     return Changed;
1658 
1659   if (GVar->isConstant() || !GVar->hasInitializer())
1660     return Changed;
1661 
1662   return processInternalGlobal(GVar, GS, GetTTI, GetTLI, LookupDomTree) ||
1663          Changed;
1664 }
1665 
1666 /// Walk all of the direct calls of the specified function, changing them to
1667 /// FastCC.
1668 static void ChangeCalleesToFastCall(Function *F) {
1669   for (User *U : F->users()) {
1670     if (isa<BlockAddress>(U))
1671       continue;
1672     cast<CallBase>(U)->setCallingConv(CallingConv::Fast);
1673   }
1674 }
1675 
1676 static AttributeList StripAttr(LLVMContext &C, AttributeList Attrs,
1677                                Attribute::AttrKind A) {
1678   unsigned AttrIndex;
1679   if (Attrs.hasAttrSomewhere(A, &AttrIndex))
1680     return Attrs.removeAttributeAtIndex(C, AttrIndex, A);
1681   return Attrs;
1682 }
1683 
1684 static void RemoveAttribute(Function *F, Attribute::AttrKind A) {
1685   F->setAttributes(StripAttr(F->getContext(), F->getAttributes(), A));
1686   for (User *U : F->users()) {
1687     if (isa<BlockAddress>(U))
1688       continue;
1689     CallBase *CB = cast<CallBase>(U);
1690     CB->setAttributes(StripAttr(F->getContext(), CB->getAttributes(), A));
1691   }
1692 }
1693 
1694 /// Return true if this is a calling convention that we'd like to change.  The
1695 /// idea here is that we don't want to mess with the convention if the user
1696 /// explicitly requested something with performance implications like coldcc,
1697 /// GHC, or anyregcc.
1698 static bool hasChangeableCCImpl(Function *F) {
1699   CallingConv::ID CC = F->getCallingConv();
1700 
1701   // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
1702   if (CC != CallingConv::C && CC != CallingConv::X86_ThisCall)
1703     return false;
1704 
1705   if (F->isVarArg())
1706     return false;
1707 
1708   // FIXME: Change CC for the whole chain of musttail calls when possible.
1709   //
1710   // Can't change CC of the function that either has musttail calls, or is a
1711   // musttail callee itself
1712   for (User *U : F->users()) {
1713     if (isa<BlockAddress>(U))
1714       continue;
1715     CallInst* CI = dyn_cast<CallInst>(U);
1716     if (!CI)
1717       continue;
1718 
1719     if (CI->isMustTailCall())
1720       return false;
1721   }
1722 
1723   for (BasicBlock &BB : *F)
1724     if (BB.getTerminatingMustTailCall())
1725       return false;
1726 
1727   return !F->hasAddressTaken();
1728 }
1729 
1730 using ChangeableCCCacheTy = SmallDenseMap<Function *, bool, 8>;
1731 static bool hasChangeableCC(Function *F,
1732                             ChangeableCCCacheTy &ChangeableCCCache) {
1733   auto Res = ChangeableCCCache.try_emplace(F, false);
1734   if (Res.second)
1735     Res.first->second = hasChangeableCCImpl(F);
1736   return Res.first->second;
1737 }
1738 
1739 /// Return true if the block containing the call site has a BlockFrequency of
1740 /// less than ColdCCRelFreq% of the entry block.
1741 static bool isColdCallSite(CallBase &CB, BlockFrequencyInfo &CallerBFI) {
1742   const BranchProbability ColdProb(ColdCCRelFreq, 100);
1743   auto *CallSiteBB = CB.getParent();
1744   auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB);
1745   auto CallerEntryFreq =
1746       CallerBFI.getBlockFreq(&(CB.getCaller()->getEntryBlock()));
1747   return CallSiteFreq < CallerEntryFreq * ColdProb;
1748 }
1749 
1750 // This function checks if the input function F is cold at all call sites. It
1751 // also looks each call site's containing function, returning false if the
1752 // caller function contains other non cold calls. The input vector AllCallsCold
1753 // contains a list of functions that only have call sites in cold blocks.
1754 static bool
1755 isValidCandidateForColdCC(Function &F,
1756                           function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
1757                           const std::vector<Function *> &AllCallsCold) {
1758 
1759   if (F.user_empty())
1760     return false;
1761 
1762   for (User *U : F.users()) {
1763     if (isa<BlockAddress>(U))
1764       continue;
1765 
1766     CallBase &CB = cast<CallBase>(*U);
1767     Function *CallerFunc = CB.getParent()->getParent();
1768     BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc);
1769     if (!isColdCallSite(CB, CallerBFI))
1770       return false;
1771     if (!llvm::is_contained(AllCallsCold, CallerFunc))
1772       return false;
1773   }
1774   return true;
1775 }
1776 
1777 static void changeCallSitesToColdCC(Function *F) {
1778   for (User *U : F->users()) {
1779     if (isa<BlockAddress>(U))
1780       continue;
1781     cast<CallBase>(U)->setCallingConv(CallingConv::Cold);
1782   }
1783 }
1784 
1785 // This function iterates over all the call instructions in the input Function
1786 // and checks that all call sites are in cold blocks and are allowed to use the
1787 // coldcc calling convention.
1788 static bool
1789 hasOnlyColdCalls(Function &F,
1790                  function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
1791                  ChangeableCCCacheTy &ChangeableCCCache) {
1792   for (BasicBlock &BB : F) {
1793     for (Instruction &I : BB) {
1794       if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1795         // Skip over isline asm instructions since they aren't function calls.
1796         if (CI->isInlineAsm())
1797           continue;
1798         Function *CalledFn = CI->getCalledFunction();
1799         if (!CalledFn)
1800           return false;
1801         // Skip over intrinsics since they won't remain as function calls.
1802         // Important to do this check before the linkage check below so we
1803         // won't bail out on debug intrinsics, possibly making the generated
1804         // code dependent on the presence of debug info.
1805         if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic)
1806           continue;
1807         if (!CalledFn->hasLocalLinkage())
1808           return false;
1809         // Check if it's valid to use coldcc calling convention.
1810         if (!hasChangeableCC(CalledFn, ChangeableCCCache))
1811           return false;
1812         BlockFrequencyInfo &CallerBFI = GetBFI(F);
1813         if (!isColdCallSite(*CI, CallerBFI))
1814           return false;
1815       }
1816     }
1817   }
1818   return true;
1819 }
1820 
1821 static bool hasMustTailCallers(Function *F) {
1822   for (User *U : F->users()) {
1823     CallBase *CB = dyn_cast<CallBase>(U);
1824     if (!CB) {
1825       assert(isa<BlockAddress>(U) &&
1826              "Expected either CallBase or BlockAddress");
1827       continue;
1828     }
1829     if (CB->isMustTailCall())
1830       return true;
1831   }
1832   return false;
1833 }
1834 
1835 static bool hasInvokeCallers(Function *F) {
1836   for (User *U : F->users())
1837     if (isa<InvokeInst>(U))
1838       return true;
1839   return false;
1840 }
1841 
1842 static void RemovePreallocated(Function *F) {
1843   RemoveAttribute(F, Attribute::Preallocated);
1844 
1845   auto *M = F->getParent();
1846 
1847   IRBuilder<> Builder(M->getContext());
1848 
1849   // Cannot modify users() while iterating over it, so make a copy.
1850   SmallVector<User *, 4> PreallocatedCalls(F->users());
1851   for (User *U : PreallocatedCalls) {
1852     CallBase *CB = dyn_cast<CallBase>(U);
1853     if (!CB)
1854       continue;
1855 
1856     assert(
1857         !CB->isMustTailCall() &&
1858         "Shouldn't call RemotePreallocated() on a musttail preallocated call");
1859     // Create copy of call without "preallocated" operand bundle.
1860     SmallVector<OperandBundleDef, 1> OpBundles;
1861     CB->getOperandBundlesAsDefs(OpBundles);
1862     CallBase *PreallocatedSetup = nullptr;
1863     for (auto *It = OpBundles.begin(); It != OpBundles.end(); ++It) {
1864       if (It->getTag() == "preallocated") {
1865         PreallocatedSetup = cast<CallBase>(*It->input_begin());
1866         OpBundles.erase(It);
1867         break;
1868       }
1869     }
1870     assert(PreallocatedSetup && "Did not find preallocated bundle");
1871     uint64_t ArgCount =
1872         cast<ConstantInt>(PreallocatedSetup->getArgOperand(0))->getZExtValue();
1873 
1874     assert((isa<CallInst>(CB) || isa<InvokeInst>(CB)) &&
1875            "Unknown indirect call type");
1876     CallBase *NewCB = CallBase::Create(CB, OpBundles, CB->getIterator());
1877     CB->replaceAllUsesWith(NewCB);
1878     NewCB->takeName(CB);
1879     CB->eraseFromParent();
1880 
1881     Builder.SetInsertPoint(PreallocatedSetup);
1882     auto *StackSave = Builder.CreateStackSave();
1883     Builder.SetInsertPoint(NewCB->getNextNonDebugInstruction());
1884     Builder.CreateStackRestore(StackSave);
1885 
1886     // Replace @llvm.call.preallocated.arg() with alloca.
1887     // Cannot modify users() while iterating over it, so make a copy.
1888     // @llvm.call.preallocated.arg() can be called with the same index multiple
1889     // times. So for each @llvm.call.preallocated.arg(), we see if we have
1890     // already created a Value* for the index, and if not, create an alloca and
1891     // bitcast right after the @llvm.call.preallocated.setup() so that it
1892     // dominates all uses.
1893     SmallVector<Value *, 2> ArgAllocas(ArgCount);
1894     SmallVector<User *, 2> PreallocatedArgs(PreallocatedSetup->users());
1895     for (auto *User : PreallocatedArgs) {
1896       auto *UseCall = cast<CallBase>(User);
1897       assert(UseCall->getCalledFunction()->getIntrinsicID() ==
1898                  Intrinsic::call_preallocated_arg &&
1899              "preallocated token use was not a llvm.call.preallocated.arg");
1900       uint64_t AllocArgIndex =
1901           cast<ConstantInt>(UseCall->getArgOperand(1))->getZExtValue();
1902       Value *AllocaReplacement = ArgAllocas[AllocArgIndex];
1903       if (!AllocaReplacement) {
1904         auto AddressSpace = UseCall->getType()->getPointerAddressSpace();
1905         auto *ArgType =
1906             UseCall->getFnAttr(Attribute::Preallocated).getValueAsType();
1907         auto *InsertBefore = PreallocatedSetup->getNextNonDebugInstruction();
1908         Builder.SetInsertPoint(InsertBefore);
1909         auto *Alloca =
1910             Builder.CreateAlloca(ArgType, AddressSpace, nullptr, "paarg");
1911         ArgAllocas[AllocArgIndex] = Alloca;
1912         AllocaReplacement = Alloca;
1913       }
1914 
1915       UseCall->replaceAllUsesWith(AllocaReplacement);
1916       UseCall->eraseFromParent();
1917     }
1918     // Remove @llvm.call.preallocated.setup().
1919     cast<Instruction>(PreallocatedSetup)->eraseFromParent();
1920   }
1921 }
1922 
1923 static bool
1924 OptimizeFunctions(Module &M,
1925                   function_ref<TargetLibraryInfo &(Function &)> GetTLI,
1926                   function_ref<TargetTransformInfo &(Function &)> GetTTI,
1927                   function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
1928                   function_ref<DominatorTree &(Function &)> LookupDomTree,
1929                   SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats,
1930                   function_ref<void(Function &F)> ChangedCFGCallback,
1931                   function_ref<void(Function &F)> DeleteFnCallback) {
1932 
1933   bool Changed = false;
1934 
1935   ChangeableCCCacheTy ChangeableCCCache;
1936   std::vector<Function *> AllCallsCold;
1937   for (Function &F : llvm::make_early_inc_range(M))
1938     if (hasOnlyColdCalls(F, GetBFI, ChangeableCCCache))
1939       AllCallsCold.push_back(&F);
1940 
1941   // Optimize functions.
1942   for (Function &F : llvm::make_early_inc_range(M)) {
1943     // Don't perform global opt pass on naked functions; we don't want fast
1944     // calling conventions for naked functions.
1945     if (F.hasFnAttribute(Attribute::Naked))
1946       continue;
1947 
1948     // Functions without names cannot be referenced outside this module.
1949     if (!F.hasName() && !F.isDeclaration() && !F.hasLocalLinkage())
1950       F.setLinkage(GlobalValue::InternalLinkage);
1951 
1952     if (deleteIfDead(F, NotDiscardableComdats, DeleteFnCallback)) {
1953       Changed = true;
1954       continue;
1955     }
1956 
1957     // LLVM's definition of dominance allows instructions that are cyclic
1958     // in unreachable blocks, e.g.:
1959     // %pat = select i1 %condition, @global, i16* %pat
1960     // because any instruction dominates an instruction in a block that's
1961     // not reachable from entry.
1962     // So, remove unreachable blocks from the function, because a) there's
1963     // no point in analyzing them and b) GlobalOpt should otherwise grow
1964     // some more complicated logic to break these cycles.
1965     // Notify the analysis manager that we've modified the function's CFG.
1966     if (!F.isDeclaration()) {
1967       if (removeUnreachableBlocks(F)) {
1968         Changed = true;
1969         ChangedCFGCallback(F);
1970       }
1971     }
1972 
1973     Changed |= processGlobal(F, GetTTI, GetTLI, LookupDomTree);
1974 
1975     if (!F.hasLocalLinkage())
1976       continue;
1977 
1978     // If we have an inalloca parameter that we can safely remove the
1979     // inalloca attribute from, do so. This unlocks optimizations that
1980     // wouldn't be safe in the presence of inalloca.
1981     // FIXME: We should also hoist alloca affected by this to the entry
1982     // block if possible.
1983     if (F.getAttributes().hasAttrSomewhere(Attribute::InAlloca) &&
1984         !F.hasAddressTaken() && !hasMustTailCallers(&F) && !F.isVarArg()) {
1985       RemoveAttribute(&F, Attribute::InAlloca);
1986       Changed = true;
1987     }
1988 
1989     // FIXME: handle invokes
1990     // FIXME: handle musttail
1991     if (F.getAttributes().hasAttrSomewhere(Attribute::Preallocated)) {
1992       if (!F.hasAddressTaken() && !hasMustTailCallers(&F) &&
1993           !hasInvokeCallers(&F)) {
1994         RemovePreallocated(&F);
1995         Changed = true;
1996       }
1997       continue;
1998     }
1999 
2000     if (hasChangeableCC(&F, ChangeableCCCache)) {
2001       NumInternalFunc++;
2002       TargetTransformInfo &TTI = GetTTI(F);
2003       // Change the calling convention to coldcc if either stress testing is
2004       // enabled or the target would like to use coldcc on functions which are
2005       // cold at all call sites and the callers contain no other non coldcc
2006       // calls.
2007       if (EnableColdCCStressTest ||
2008           (TTI.useColdCCForColdCall(F) &&
2009            isValidCandidateForColdCC(F, GetBFI, AllCallsCold))) {
2010         ChangeableCCCache.erase(&F);
2011         F.setCallingConv(CallingConv::Cold);
2012         changeCallSitesToColdCC(&F);
2013         Changed = true;
2014         NumColdCC++;
2015       }
2016     }
2017 
2018     if (hasChangeableCC(&F, ChangeableCCCache)) {
2019       // If this function has a calling convention worth changing, is not a
2020       // varargs function, and is only called directly, promote it to use the
2021       // Fast calling convention.
2022       F.setCallingConv(CallingConv::Fast);
2023       ChangeCalleesToFastCall(&F);
2024       ++NumFastCallFns;
2025       Changed = true;
2026     }
2027 
2028     if (F.getAttributes().hasAttrSomewhere(Attribute::Nest) &&
2029         !F.hasAddressTaken()) {
2030       // The function is not used by a trampoline intrinsic, so it is safe
2031       // to remove the 'nest' attribute.
2032       RemoveAttribute(&F, Attribute::Nest);
2033       ++NumNestRemoved;
2034       Changed = true;
2035     }
2036   }
2037   return Changed;
2038 }
2039 
2040 static bool callInstIsMemcpy(CallInst *CI) {
2041   if (!CI)
2042     return false;
2043 
2044   Function *F = CI->getCalledFunction();
2045   if (!F || !F->isIntrinsic() || F->getIntrinsicID() != Intrinsic::memcpy)
2046     return false;
2047 
2048   return true;
2049 }
2050 
2051 static bool destArrayCanBeWidened(CallInst *CI) {
2052   auto *IsVolatile = dyn_cast<ConstantInt>(CI->getArgOperand(3));
2053   auto *Alloca = dyn_cast<AllocaInst>(CI->getArgOperand(0));
2054 
2055   if (!Alloca || !IsVolatile || IsVolatile->isOne())
2056     return false;
2057 
2058   if (!Alloca->isStaticAlloca())
2059     return false;
2060 
2061   if (!Alloca->getAllocatedType()->isArrayTy())
2062     return false;
2063 
2064   return true;
2065 }
2066 
2067 static GlobalVariable *widenGlobalVariable(GlobalVariable *OldVar, Function *F,
2068                                            unsigned NumBytesToPad,
2069                                            unsigned NumBytesToCopy) {
2070   if (!OldVar->hasInitializer())
2071     return nullptr;
2072 
2073   ConstantDataArray *DataArray =
2074       dyn_cast<ConstantDataArray>(OldVar->getInitializer());
2075   if (!DataArray)
2076     return nullptr;
2077 
2078   // Update to be word aligned (memcpy(...,X,...))
2079   // create replacement with padded null bytes.
2080   StringRef Data = DataArray->getRawDataValues();
2081   std::vector<uint8_t> StrData(Data.begin(), Data.end());
2082   for (unsigned int p = 0; p < NumBytesToPad; p++)
2083     StrData.push_back('\0');
2084   auto Arr = ArrayRef(StrData.data(), NumBytesToCopy + NumBytesToPad);
2085   // Create new padded version of global variable.
2086   Constant *SourceReplace = ConstantDataArray::get(F->getContext(), Arr);
2087   GlobalVariable *NewGV = new GlobalVariable(
2088       *(F->getParent()), SourceReplace->getType(), true, OldVar->getLinkage(),
2089       SourceReplace, SourceReplace->getName());
2090   // Copy any other attributes from original global variable
2091   // e.g. unamed_addr
2092   NewGV->copyAttributesFrom(OldVar);
2093   NewGV->takeName(OldVar);
2094   return NewGV;
2095 }
2096 
2097 static void widenDestArray(CallInst *CI, const unsigned NumBytesToPad,
2098                            const unsigned NumBytesToCopy,
2099                            ConstantDataArray *SourceDataArray) {
2100 
2101   auto *Alloca = dyn_cast<AllocaInst>(CI->getArgOperand(0));
2102   if (Alloca) {
2103     unsigned ElementByteWidth = SourceDataArray->getElementByteSize();
2104     unsigned int TotalBytes = NumBytesToCopy + NumBytesToPad;
2105     unsigned NumElementsToCopy = divideCeil(TotalBytes, ElementByteWidth);
2106     // Update destination array to be word aligned (memcpy(X,...,...))
2107     IRBuilder<> BuildAlloca(Alloca);
2108     AllocaInst *NewAlloca = BuildAlloca.CreateAlloca(ArrayType::get(
2109         Alloca->getAllocatedType()->getArrayElementType(), NumElementsToCopy));
2110     NewAlloca->takeName(Alloca);
2111     NewAlloca->setAlignment(Alloca->getAlign());
2112     Alloca->replaceAllUsesWith(NewAlloca);
2113     Alloca->eraseFromParent();
2114   }
2115 }
2116 
2117 static bool tryWidenGlobalArrayAndDests(Function *F, GlobalVariable *SourceVar,
2118                                         const unsigned NumBytesToPad,
2119                                         const unsigned NumBytesToCopy,
2120                                         ConstantInt *BytesToCopyOp,
2121                                         ConstantDataArray *SourceDataArray) {
2122   auto *NewSourceGV =
2123       widenGlobalVariable(SourceVar, F, NumBytesToPad, NumBytesToCopy);
2124   if (!NewSourceGV)
2125     return false;
2126 
2127   // Update arguments of remaining uses  that
2128   // are memcpys.
2129   for (auto *User : SourceVar->users()) {
2130     auto *CI = dyn_cast<CallInst>(User);
2131     if (!callInstIsMemcpy(CI) || !destArrayCanBeWidened(CI))
2132       continue;
2133 
2134     if (CI->getArgOperand(1) != SourceVar)
2135       continue;
2136 
2137     widenDestArray(CI, NumBytesToPad, NumBytesToCopy, SourceDataArray);
2138 
2139     CI->setArgOperand(2, ConstantInt::get(BytesToCopyOp->getType(),
2140                                           NumBytesToCopy + NumBytesToPad));
2141   }
2142   SourceVar->replaceAllUsesWith(NewSourceGV);
2143 
2144   NumGlobalArraysPadded++;
2145   return true;
2146 }
2147 
2148 static bool tryWidenGlobalArraysUsedByMemcpy(
2149     GlobalVariable *GV,
2150     function_ref<TargetTransformInfo &(Function &)> GetTTI) {
2151 
2152   if (!GV->hasInitializer() || !GV->isConstant() || !GV->hasLocalLinkage() ||
2153       !GV->hasGlobalUnnamedAddr())
2154     return false;
2155 
2156   for (auto *User : GV->users()) {
2157     CallInst *CI = dyn_cast<CallInst>(User);
2158     if (!callInstIsMemcpy(CI) || !destArrayCanBeWidened(CI))
2159       continue;
2160 
2161     Function *F = CI->getCalledFunction();
2162 
2163     auto *BytesToCopyOp = dyn_cast<ConstantInt>(CI->getArgOperand(2));
2164     if (!BytesToCopyOp)
2165       continue;
2166 
2167     ConstantDataArray *SourceDataArray =
2168         dyn_cast<ConstantDataArray>(GV->getInitializer());
2169     if (!SourceDataArray)
2170       continue;
2171 
2172     unsigned NumBytesToCopy = BytesToCopyOp->getZExtValue();
2173 
2174     auto *Alloca = dyn_cast<AllocaInst>(CI->getArgOperand(0));
2175     uint64_t DZSize = Alloca->getAllocatedType()->getArrayNumElements();
2176     uint64_t SZSize = SourceDataArray->getType()->getNumElements();
2177     unsigned ElementByteWidth = SourceDataArray->getElementByteSize();
2178     // Calculate the number of elements to copy while avoiding floored
2179     // division of integers returning wrong values i.e. copying one byte
2180     // from an array of i16 would yield 0 elements to copy as supposed to 1.
2181     unsigned NumElementsToCopy = divideCeil(NumBytesToCopy, ElementByteWidth);
2182 
2183     // For safety purposes lets add a constraint and only pad when
2184     // NumElementsToCopy == destination array size ==
2185     // source which is a constant
2186     if (NumElementsToCopy != DZSize || DZSize != SZSize)
2187       continue;
2188 
2189     unsigned NumBytesToPad = GetTTI(*F).getNumBytesToPadGlobalArray(
2190         NumBytesToCopy, SourceDataArray->getType());
2191     if (NumBytesToPad) {
2192       return tryWidenGlobalArrayAndDests(F, GV, NumBytesToPad, NumBytesToCopy,
2193                                          BytesToCopyOp, SourceDataArray);
2194     }
2195   }
2196   return false;
2197 }
2198 
2199 static bool
2200 OptimizeGlobalVars(Module &M,
2201                    function_ref<TargetTransformInfo &(Function &)> GetTTI,
2202                    function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2203                    function_ref<DominatorTree &(Function &)> LookupDomTree,
2204                    SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2205   bool Changed = false;
2206 
2207   for (GlobalVariable &GV : llvm::make_early_inc_range(M.globals())) {
2208     // Global variables without names cannot be referenced outside this module.
2209     if (!GV.hasName() && !GV.isDeclaration() && !GV.hasLocalLinkage())
2210       GV.setLinkage(GlobalValue::InternalLinkage);
2211     // Simplify the initializer.
2212     if (GV.hasInitializer())
2213       if (auto *C = dyn_cast<Constant>(GV.getInitializer())) {
2214         auto &DL = M.getDataLayout();
2215         // TLI is not used in the case of a Constant, so use default nullptr
2216         // for that optional parameter, since we don't have a Function to
2217         // provide GetTLI anyway.
2218         Constant *New = ConstantFoldConstant(C, DL, /*TLI*/ nullptr);
2219         if (New != C)
2220           GV.setInitializer(New);
2221       }
2222 
2223     if (deleteIfDead(GV, NotDiscardableComdats)) {
2224       Changed = true;
2225       continue;
2226     }
2227 
2228     // For global variable arrays called in a memcpy
2229     // we try to pad to nearest valid alignment boundary
2230     Changed |= tryWidenGlobalArraysUsedByMemcpy(&GV, GetTTI);
2231 
2232     Changed |= processGlobal(GV, GetTTI, GetTLI, LookupDomTree);
2233   }
2234   return Changed;
2235 }
2236 
2237 /// Evaluate static constructors in the function, if we can.  Return true if we
2238 /// can, false otherwise.
2239 static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
2240                                       TargetLibraryInfo *TLI) {
2241   // Skip external functions.
2242   if (F->isDeclaration())
2243     return false;
2244   // Call the function.
2245   Evaluator Eval(DL, TLI);
2246   Constant *RetValDummy;
2247   bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
2248                                            SmallVector<Constant*, 0>());
2249 
2250   if (EvalSuccess) {
2251     ++NumCtorsEvaluated;
2252 
2253     // We succeeded at evaluation: commit the result.
2254     auto NewInitializers = Eval.getMutatedInitializers();
2255     LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2256                       << F->getName() << "' to " << NewInitializers.size()
2257                       << " stores.\n");
2258     for (const auto &Pair : NewInitializers)
2259       Pair.first->setInitializer(Pair.second);
2260     for (GlobalVariable *GV : Eval.getInvariants())
2261       GV->setConstant(true);
2262   }
2263 
2264   return EvalSuccess;
2265 }
2266 
2267 static int compareNames(Constant *const *A, Constant *const *B) {
2268   Value *AStripped = (*A)->stripPointerCasts();
2269   Value *BStripped = (*B)->stripPointerCasts();
2270   return AStripped->getName().compare(BStripped->getName());
2271 }
2272 
2273 static void setUsedInitializer(GlobalVariable &V,
2274                                const SmallPtrSetImpl<GlobalValue *> &Init) {
2275   if (Init.empty()) {
2276     V.eraseFromParent();
2277     return;
2278   }
2279 
2280   // Get address space of pointers in the array of pointers.
2281   const Type *UsedArrayType = V.getValueType();
2282   const auto *VAT = cast<ArrayType>(UsedArrayType);
2283   const auto *VEPT = cast<PointerType>(VAT->getArrayElementType());
2284 
2285   // Type of pointer to the array of pointers.
2286   PointerType *PtrTy =
2287       PointerType::get(V.getContext(), VEPT->getAddressSpace());
2288 
2289   SmallVector<Constant *, 8> UsedArray;
2290   for (GlobalValue *GV : Init) {
2291     Constant *Cast = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, PtrTy);
2292     UsedArray.push_back(Cast);
2293   }
2294 
2295   // Sort to get deterministic order.
2296   array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames);
2297   ArrayType *ATy = ArrayType::get(PtrTy, UsedArray.size());
2298 
2299   Module *M = V.getParent();
2300   V.removeFromParent();
2301   GlobalVariable *NV =
2302       new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage,
2303                          ConstantArray::get(ATy, UsedArray), "");
2304   NV->takeName(&V);
2305   NV->setSection("llvm.metadata");
2306   delete &V;
2307 }
2308 
2309 namespace {
2310 
2311 /// An easy to access representation of llvm.used and llvm.compiler.used.
2312 class LLVMUsed {
2313   SmallPtrSet<GlobalValue *, 4> Used;
2314   SmallPtrSet<GlobalValue *, 4> CompilerUsed;
2315   GlobalVariable *UsedV;
2316   GlobalVariable *CompilerUsedV;
2317 
2318 public:
2319   LLVMUsed(Module &M) {
2320     SmallVector<GlobalValue *, 4> Vec;
2321     UsedV = collectUsedGlobalVariables(M, Vec, false);
2322     Used = {Vec.begin(), Vec.end()};
2323     Vec.clear();
2324     CompilerUsedV = collectUsedGlobalVariables(M, Vec, true);
2325     CompilerUsed = {Vec.begin(), Vec.end()};
2326   }
2327 
2328   using iterator = SmallPtrSet<GlobalValue *, 4>::iterator;
2329   using used_iterator_range = iterator_range<iterator>;
2330 
2331   iterator usedBegin() { return Used.begin(); }
2332   iterator usedEnd() { return Used.end(); }
2333 
2334   used_iterator_range used() {
2335     return used_iterator_range(usedBegin(), usedEnd());
2336   }
2337 
2338   iterator compilerUsedBegin() { return CompilerUsed.begin(); }
2339   iterator compilerUsedEnd() { return CompilerUsed.end(); }
2340 
2341   used_iterator_range compilerUsed() {
2342     return used_iterator_range(compilerUsedBegin(), compilerUsedEnd());
2343   }
2344 
2345   bool usedCount(GlobalValue *GV) const { return Used.count(GV); }
2346 
2347   bool compilerUsedCount(GlobalValue *GV) const {
2348     return CompilerUsed.count(GV);
2349   }
2350 
2351   bool usedErase(GlobalValue *GV) { return Used.erase(GV); }
2352   bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); }
2353   bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; }
2354 
2355   bool compilerUsedInsert(GlobalValue *GV) {
2356     return CompilerUsed.insert(GV).second;
2357   }
2358 
2359   void syncVariablesAndSets() {
2360     if (UsedV)
2361       setUsedInitializer(*UsedV, Used);
2362     if (CompilerUsedV)
2363       setUsedInitializer(*CompilerUsedV, CompilerUsed);
2364   }
2365 };
2366 
2367 } // end anonymous namespace
2368 
2369 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
2370   if (GA.use_empty()) // No use at all.
2371     return false;
2372 
2373   assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
2374          "We should have removed the duplicated "
2375          "element from llvm.compiler.used");
2376   if (!GA.hasOneUse())
2377     // Strictly more than one use. So at least one is not in llvm.used and
2378     // llvm.compiler.used.
2379     return true;
2380 
2381   // Exactly one use. Check if it is in llvm.used or llvm.compiler.used.
2382   return !U.usedCount(&GA) && !U.compilerUsedCount(&GA);
2383 }
2384 
2385 static bool mayHaveOtherReferences(GlobalValue &GV, const LLVMUsed &U) {
2386   if (!GV.hasLocalLinkage())
2387     return true;
2388 
2389   return U.usedCount(&GV) || U.compilerUsedCount(&GV);
2390 }
2391 
2392 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U,
2393                              bool &RenameTarget) {
2394   if (GA.isWeakForLinker())
2395     return false;
2396 
2397   RenameTarget = false;
2398   bool Ret = false;
2399   if (hasUseOtherThanLLVMUsed(GA, U))
2400     Ret = true;
2401 
2402   // If the alias is externally visible, we may still be able to simplify it.
2403   if (!mayHaveOtherReferences(GA, U))
2404     return Ret;
2405 
2406   // If the aliasee has internal linkage and no other references (e.g.,
2407   // @llvm.used, @llvm.compiler.used), give it the name and linkage of the
2408   // alias, and delete the alias. This turns:
2409   //   define internal ... @f(...)
2410   //   @a = alias ... @f
2411   // into:
2412   //   define ... @a(...)
2413   Constant *Aliasee = GA.getAliasee();
2414   GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
2415   if (mayHaveOtherReferences(*Target, U))
2416     return Ret;
2417 
2418   RenameTarget = true;
2419   return true;
2420 }
2421 
2422 static bool
2423 OptimizeGlobalAliases(Module &M,
2424                       SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2425   bool Changed = false;
2426   LLVMUsed Used(M);
2427 
2428   for (GlobalValue *GV : Used.used())
2429     Used.compilerUsedErase(GV);
2430 
2431   // Return whether GV is explicitly or implicitly dso_local and not replaceable
2432   // by another definition in the current linkage unit.
2433   auto IsModuleLocal = [](GlobalValue &GV) {
2434     return !GlobalValue::isInterposableLinkage(GV.getLinkage()) &&
2435            (GV.isDSOLocal() || GV.isImplicitDSOLocal());
2436   };
2437 
2438   for (GlobalAlias &J : llvm::make_early_inc_range(M.aliases())) {
2439     // Aliases without names cannot be referenced outside this module.
2440     if (!J.hasName() && !J.isDeclaration() && !J.hasLocalLinkage())
2441       J.setLinkage(GlobalValue::InternalLinkage);
2442 
2443     if (deleteIfDead(J, NotDiscardableComdats)) {
2444       Changed = true;
2445       continue;
2446     }
2447 
2448     // If the alias can change at link time, nothing can be done - bail out.
2449     if (!IsModuleLocal(J))
2450       continue;
2451 
2452     Constant *Aliasee = J.getAliasee();
2453     GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts());
2454     // We can't trivially replace the alias with the aliasee if the aliasee is
2455     // non-trivial in some way. We also can't replace the alias with the aliasee
2456     // if the aliasee may be preemptible at runtime. On ELF, a non-preemptible
2457     // alias can be used to access the definition as if preemption did not
2458     // happen.
2459     // TODO: Try to handle non-zero GEPs of local aliasees.
2460     if (!Target || !IsModuleLocal(*Target))
2461       continue;
2462 
2463     Target->removeDeadConstantUsers();
2464 
2465     // Make all users of the alias use the aliasee instead.
2466     bool RenameTarget;
2467     if (!hasUsesToReplace(J, Used, RenameTarget))
2468       continue;
2469 
2470     J.replaceAllUsesWith(Aliasee);
2471     ++NumAliasesResolved;
2472     Changed = true;
2473 
2474     if (RenameTarget) {
2475       // Give the aliasee the name, linkage and other attributes of the alias.
2476       Target->takeName(&J);
2477       Target->setLinkage(J.getLinkage());
2478       Target->setDSOLocal(J.isDSOLocal());
2479       Target->setVisibility(J.getVisibility());
2480       Target->setDLLStorageClass(J.getDLLStorageClass());
2481 
2482       if (Used.usedErase(&J))
2483         Used.usedInsert(Target);
2484 
2485       if (Used.compilerUsedErase(&J))
2486         Used.compilerUsedInsert(Target);
2487     } else if (mayHaveOtherReferences(J, Used))
2488       continue;
2489 
2490     // Delete the alias.
2491     M.eraseAlias(&J);
2492     ++NumAliasesRemoved;
2493     Changed = true;
2494   }
2495 
2496   Used.syncVariablesAndSets();
2497 
2498   return Changed;
2499 }
2500 
2501 static Function *
2502 FindAtExitLibFunc(Module &M,
2503                   function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2504                   LibFunc Func) {
2505   // Hack to get a default TLI before we have actual Function.
2506   auto FuncIter = M.begin();
2507   if (FuncIter == M.end())
2508     return nullptr;
2509   auto *TLI = &GetTLI(*FuncIter);
2510 
2511   if (!TLI->has(Func))
2512     return nullptr;
2513 
2514   Function *Fn = M.getFunction(TLI->getName(Func));
2515   if (!Fn)
2516     return nullptr;
2517 
2518   // Now get the actual TLI for Fn.
2519   TLI = &GetTLI(*Fn);
2520 
2521   // Make sure that the function has the correct prototype.
2522   LibFunc F;
2523   if (!TLI->getLibFunc(*Fn, F) || F != Func)
2524     return nullptr;
2525 
2526   return Fn;
2527 }
2528 
2529 /// Returns whether the given function is an empty C++ destructor or atexit
2530 /// handler and can therefore be eliminated. Note that we assume that other
2531 /// optimization passes have already simplified the code so we simply check for
2532 /// 'ret'.
2533 static bool IsEmptyAtExitFunction(const Function &Fn) {
2534   // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
2535   // nounwind, but that doesn't seem worth doing.
2536   if (Fn.isDeclaration())
2537     return false;
2538 
2539   for (const auto &I : Fn.getEntryBlock()) {
2540     if (I.isDebugOrPseudoInst())
2541       continue;
2542     if (isa<ReturnInst>(I))
2543       return true;
2544     break;
2545   }
2546   return false;
2547 }
2548 
2549 static bool OptimizeEmptyGlobalAtExitDtors(Function *CXAAtExitFn, bool isCXX) {
2550   /// Itanium C++ ABI p3.3.5:
2551   ///
2552   ///   After constructing a global (or local static) object, that will require
2553   ///   destruction on exit, a termination function is registered as follows:
2554   ///
2555   ///   extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
2556   ///
2557   ///   This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
2558   ///   call f(p) when DSO d is unloaded, before all such termination calls
2559   ///   registered before this one. It returns zero if registration is
2560   ///   successful, nonzero on failure.
2561 
2562   // This pass will look for calls to __cxa_atexit or atexit where the function
2563   // is trivial and remove them.
2564   bool Changed = false;
2565 
2566   for (User *U : llvm::make_early_inc_range(CXAAtExitFn->users())) {
2567     // We're only interested in calls. Theoretically, we could handle invoke
2568     // instructions as well, but neither llvm-gcc nor clang generate invokes
2569     // to __cxa_atexit.
2570     CallInst *CI = dyn_cast<CallInst>(U);
2571     if (!CI)
2572       continue;
2573 
2574     Function *DtorFn =
2575       dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
2576     if (!DtorFn || !IsEmptyAtExitFunction(*DtorFn))
2577       continue;
2578 
2579     // Just remove the call.
2580     CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
2581     CI->eraseFromParent();
2582 
2583     if (isCXX)
2584       ++NumCXXDtorsRemoved;
2585     else
2586       ++NumAtExitRemoved;
2587 
2588     Changed |= true;
2589   }
2590 
2591   return Changed;
2592 }
2593 
2594 static Function *hasSideeffectFreeStaticResolution(GlobalIFunc &IF) {
2595   if (IF.isInterposable())
2596     return nullptr;
2597 
2598   Function *Resolver = IF.getResolverFunction();
2599   if (!Resolver)
2600     return nullptr;
2601 
2602   if (Resolver->isInterposable())
2603     return nullptr;
2604 
2605   // Only handle functions that have been optimized into a single basic block.
2606   auto It = Resolver->begin();
2607   if (++It != Resolver->end())
2608     return nullptr;
2609 
2610   BasicBlock &BB = Resolver->getEntryBlock();
2611 
2612   if (any_of(BB, [](Instruction &I) { return I.mayHaveSideEffects(); }))
2613     return nullptr;
2614 
2615   auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator());
2616   if (!Ret)
2617     return nullptr;
2618 
2619   return dyn_cast<Function>(Ret->getReturnValue());
2620 }
2621 
2622 /// Find IFuncs that have resolvers that always point at the same statically
2623 /// known callee, and replace their callers with a direct call.
2624 static bool OptimizeStaticIFuncs(Module &M) {
2625   bool Changed = false;
2626   for (GlobalIFunc &IF : M.ifuncs())
2627     if (Function *Callee = hasSideeffectFreeStaticResolution(IF))
2628       if (!IF.use_empty() &&
2629           (!Callee->isDeclaration() ||
2630            none_of(IF.users(), [](User *U) { return isa<GlobalAlias>(U); }))) {
2631         IF.replaceAllUsesWith(Callee);
2632         NumIFuncsResolved++;
2633         Changed = true;
2634       }
2635   return Changed;
2636 }
2637 
2638 static bool
2639 DeleteDeadIFuncs(Module &M,
2640                  SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2641   bool Changed = false;
2642   for (GlobalIFunc &IF : make_early_inc_range(M.ifuncs()))
2643     if (deleteIfDead(IF, NotDiscardableComdats)) {
2644       NumIFuncsDeleted++;
2645       Changed = true;
2646     }
2647   return Changed;
2648 }
2649 
2650 // Follows the use-def chain of \p V backwards until it finds a Function,
2651 // in which case it collects in \p Versions. Return true on successful
2652 // use-def chain traversal, false otherwise.
2653 static bool collectVersions(TargetTransformInfo &TTI, Value *V,
2654                             SmallVectorImpl<Function *> &Versions) {
2655   if (auto *F = dyn_cast<Function>(V)) {
2656     if (!TTI.isMultiversionedFunction(*F))
2657       return false;
2658     Versions.push_back(F);
2659   } else if (auto *Sel = dyn_cast<SelectInst>(V)) {
2660     if (!collectVersions(TTI, Sel->getTrueValue(), Versions))
2661       return false;
2662     if (!collectVersions(TTI, Sel->getFalseValue(), Versions))
2663       return false;
2664   } else if (auto *Phi = dyn_cast<PHINode>(V)) {
2665     for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
2666       if (!collectVersions(TTI, Phi->getIncomingValue(I), Versions))
2667         return false;
2668   } else {
2669     // Unknown instruction type. Bail.
2670     return false;
2671   }
2672   return true;
2673 }
2674 
2675 // Bypass the IFunc Resolver of MultiVersioned functions when possible. To
2676 // deduce whether the optimization is legal we need to compare the target
2677 // features between caller and callee versions. The criteria for bypassing
2678 // the resolver are the following:
2679 //
2680 // * If the callee's feature set is a subset of the caller's feature set,
2681 //   then the callee is a candidate for direct call.
2682 //
2683 // * Among such candidates the one of highest priority is the best match
2684 //   and it shall be picked, unless there is a version of the callee with
2685 //   higher priority than the best match which cannot be picked from a
2686 //   higher priority caller (directly or through the resolver).
2687 //
2688 // * For every higher priority callee version than the best match, there
2689 //   is a higher priority caller version whose feature set availability
2690 //   is implied by the callee's feature set.
2691 //
2692 static bool OptimizeNonTrivialIFuncs(
2693     Module &M, function_ref<TargetTransformInfo &(Function &)> GetTTI) {
2694   bool Changed = false;
2695 
2696   // Cache containing the mask constructed from a function's target features.
2697   DenseMap<Function *, uint64_t> FeatureMask;
2698 
2699   for (GlobalIFunc &IF : M.ifuncs()) {
2700     if (IF.isInterposable())
2701       continue;
2702 
2703     Function *Resolver = IF.getResolverFunction();
2704     if (!Resolver)
2705       continue;
2706 
2707     if (Resolver->isInterposable())
2708       continue;
2709 
2710     TargetTransformInfo &TTI = GetTTI(*Resolver);
2711 
2712     // Discover the callee versions.
2713     SmallVector<Function *> Callees;
2714     if (any_of(*Resolver, [&TTI, &Callees](BasicBlock &BB) {
2715           if (auto *Ret = dyn_cast_or_null<ReturnInst>(BB.getTerminator()))
2716             if (!collectVersions(TTI, Ret->getReturnValue(), Callees))
2717               return true;
2718           return false;
2719         }))
2720       continue;
2721 
2722     assert(!Callees.empty() && "Expecting successful collection of versions");
2723 
2724     LLVM_DEBUG(dbgs() << "Statically resolving calls to function "
2725                       << Resolver->getName() << "\n");
2726 
2727     // Cache the feature mask for each callee.
2728     for (Function *Callee : Callees) {
2729       auto [It, Inserted] = FeatureMask.try_emplace(Callee);
2730       if (Inserted)
2731         It->second = TTI.getFeatureMask(*Callee);
2732     }
2733 
2734     // Sort the callee versions in decreasing priority order.
2735     sort(Callees, [&](auto *LHS, auto *RHS) {
2736       return FeatureMask[LHS] > FeatureMask[RHS];
2737     });
2738 
2739     // Find the callsites and cache the feature mask for each caller.
2740     SmallVector<Function *> Callers;
2741     DenseMap<Function *, SmallVector<CallBase *>> CallSites;
2742     for (User *U : IF.users()) {
2743       if (auto *CB = dyn_cast<CallBase>(U)) {
2744         if (CB->getCalledOperand() == &IF) {
2745           Function *Caller = CB->getFunction();
2746           auto [FeatIt, FeatInserted] = FeatureMask.try_emplace(Caller);
2747           if (FeatInserted)
2748             FeatIt->second = TTI.getFeatureMask(*Caller);
2749           auto [CallIt, CallInserted] = CallSites.try_emplace(Caller);
2750           if (CallInserted)
2751             Callers.push_back(Caller);
2752           CallIt->second.push_back(CB);
2753         }
2754       }
2755     }
2756 
2757     // Sort the caller versions in decreasing priority order.
2758     sort(Callers, [&](auto *LHS, auto *RHS) {
2759       return FeatureMask[LHS] > FeatureMask[RHS];
2760     });
2761 
2762     auto implies = [](uint64_t A, uint64_t B) { return (A & B) == B; };
2763 
2764     // Index to the highest priority candidate.
2765     unsigned I = 0;
2766     // Now try to redirect calls starting from higher priority callers.
2767     for (Function *Caller : Callers) {
2768       assert(I < Callees.size() && "Found callers of equal priority");
2769 
2770       Function *Callee = Callees[I];
2771       uint64_t CallerBits = FeatureMask[Caller];
2772       uint64_t CalleeBits = FeatureMask[Callee];
2773 
2774       // In the case of FMV callers, we know that all higher priority callers
2775       // than the current one did not get selected at runtime, which helps
2776       // reason about the callees (if they have versions that mandate presence
2777       // of the features which we already know are unavailable on this target).
2778       if (TTI.isMultiversionedFunction(*Caller)) {
2779         // If the feature set of the caller implies the feature set of the
2780         // highest priority candidate then it shall be picked. In case of
2781         // identical sets advance the candidate index one position.
2782         if (CallerBits == CalleeBits)
2783           ++I;
2784         else if (!implies(CallerBits, CalleeBits)) {
2785           // Keep advancing the candidate index as long as the caller's
2786           // features are a subset of the current candidate's.
2787           while (implies(CalleeBits, CallerBits)) {
2788             if (++I == Callees.size())
2789               break;
2790             CalleeBits = FeatureMask[Callees[I]];
2791           }
2792           continue;
2793         }
2794       } else {
2795         // We can't reason much about non-FMV callers. Just pick the highest
2796         // priority callee if it matches, otherwise bail.
2797         if (!OptimizeNonFMVCallers || I > 0 || !implies(CallerBits, CalleeBits))
2798           continue;
2799       }
2800       auto &Calls = CallSites[Caller];
2801       for (CallBase *CS : Calls) {
2802         LLVM_DEBUG(dbgs() << "Redirecting call " << Caller->getName() << " -> "
2803                           << Callee->getName() << "\n");
2804         CS->setCalledOperand(Callee);
2805       }
2806       Changed = true;
2807     }
2808     if (IF.use_empty() ||
2809         all_of(IF.users(), [](User *U) { return isa<GlobalAlias>(U); }))
2810       NumIFuncsResolved++;
2811   }
2812   return Changed;
2813 }
2814 
2815 static bool
2816 optimizeGlobalsInModule(Module &M, const DataLayout &DL,
2817                         function_ref<TargetLibraryInfo &(Function &)> GetTLI,
2818                         function_ref<TargetTransformInfo &(Function &)> GetTTI,
2819                         function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2820                         function_ref<DominatorTree &(Function &)> LookupDomTree,
2821                         function_ref<void(Function &F)> ChangedCFGCallback,
2822                         function_ref<void(Function &F)> DeleteFnCallback) {
2823   SmallPtrSet<const Comdat *, 8> NotDiscardableComdats;
2824   bool Changed = false;
2825   bool LocalChange = true;
2826   std::optional<uint32_t> FirstNotFullyEvaluatedPriority;
2827 
2828   while (LocalChange) {
2829     LocalChange = false;
2830 
2831     NotDiscardableComdats.clear();
2832     for (const GlobalVariable &GV : M.globals())
2833       if (const Comdat *C = GV.getComdat())
2834         if (!GV.isDiscardableIfUnused() || !GV.use_empty())
2835           NotDiscardableComdats.insert(C);
2836     for (Function &F : M)
2837       if (const Comdat *C = F.getComdat())
2838         if (!F.isDefTriviallyDead())
2839           NotDiscardableComdats.insert(C);
2840     for (GlobalAlias &GA : M.aliases())
2841       if (const Comdat *C = GA.getComdat())
2842         if (!GA.isDiscardableIfUnused() || !GA.use_empty())
2843           NotDiscardableComdats.insert(C);
2844 
2845     // Delete functions that are trivially dead, ccc -> fastcc
2846     LocalChange |= OptimizeFunctions(M, GetTLI, GetTTI, GetBFI, LookupDomTree,
2847                                      NotDiscardableComdats, ChangedCFGCallback,
2848                                      DeleteFnCallback);
2849 
2850     // Optimize global_ctors list.
2851     LocalChange |=
2852         optimizeGlobalCtorsList(M, [&](uint32_t Priority, Function *F) {
2853           if (FirstNotFullyEvaluatedPriority &&
2854               *FirstNotFullyEvaluatedPriority != Priority)
2855             return false;
2856           bool Evaluated = EvaluateStaticConstructor(F, DL, &GetTLI(*F));
2857           if (!Evaluated)
2858             FirstNotFullyEvaluatedPriority = Priority;
2859           return Evaluated;
2860         });
2861 
2862     // Optimize non-address-taken globals.
2863     LocalChange |= OptimizeGlobalVars(M, GetTTI, GetTLI, LookupDomTree,
2864                                       NotDiscardableComdats);
2865 
2866     // Resolve aliases, when possible.
2867     LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats);
2868 
2869     // Try to remove trivial global destructors if they are not removed
2870     // already.
2871     if (Function *CXAAtExitFn =
2872             FindAtExitLibFunc(M, GetTLI, LibFunc_cxa_atexit))
2873       LocalChange |= OptimizeEmptyGlobalAtExitDtors(CXAAtExitFn, true);
2874 
2875     if (Function *AtExitFn = FindAtExitLibFunc(M, GetTLI, LibFunc_atexit))
2876       LocalChange |= OptimizeEmptyGlobalAtExitDtors(AtExitFn, false);
2877 
2878     // Optimize IFuncs whose callee's are statically known.
2879     LocalChange |= OptimizeStaticIFuncs(M);
2880 
2881     // Optimize IFuncs based on the target features of the caller.
2882     LocalChange |= OptimizeNonTrivialIFuncs(M, GetTTI);
2883 
2884     // Remove any IFuncs that are now dead.
2885     LocalChange |= DeleteDeadIFuncs(M, NotDiscardableComdats);
2886 
2887     Changed |= LocalChange;
2888   }
2889 
2890   // TODO: Move all global ctors functions to the end of the module for code
2891   // layout.
2892 
2893   return Changed;
2894 }
2895 
2896 PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) {
2897     auto &DL = M.getDataLayout();
2898     auto &FAM =
2899         AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
2900     auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{
2901       return FAM.getResult<DominatorTreeAnalysis>(F);
2902     };
2903     auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
2904       return FAM.getResult<TargetLibraryAnalysis>(F);
2905     };
2906     auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & {
2907       return FAM.getResult<TargetIRAnalysis>(F);
2908     };
2909 
2910     auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & {
2911       return FAM.getResult<BlockFrequencyAnalysis>(F);
2912     };
2913     auto ChangedCFGCallback = [&FAM](Function &F) {
2914       FAM.invalidate(F, PreservedAnalyses::none());
2915     };
2916     auto DeleteFnCallback = [&FAM](Function &F) { FAM.clear(F, F.getName()); };
2917 
2918     if (!optimizeGlobalsInModule(M, DL, GetTLI, GetTTI, GetBFI, LookupDomTree,
2919                                  ChangedCFGCallback, DeleteFnCallback))
2920       return PreservedAnalyses::all();
2921 
2922     PreservedAnalyses PA = PreservedAnalyses::none();
2923     // We made sure to clear analyses for deleted functions.
2924     PA.preserve<FunctionAnalysisManagerModuleProxy>();
2925     // The only place we modify the CFG is when calling
2926     // removeUnreachableBlocks(), but there we make sure to invalidate analyses
2927     // for modified functions.
2928     PA.preserveSet<CFGAnalyses>();
2929     return PA;
2930 }
2931