xref: /llvm-project/llvm/lib/CodeGen/GlobalMerge.cpp (revision 0f669154e114357d0af5eccd09f8e031c07a8a3e)
1 //===- GlobalMerge.cpp - Internal globals merging -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass merges globals with internal linkage into one. This way all the
10 // globals which were merged into a biggest one can be addressed using offsets
11 // from the same base pointer (no need for separate base pointer for each of the
12 // global). Such a transformation can significantly reduce the register pressure
13 // when many globals are involved.
14 //
15 // For example, consider the code which touches several global variables at
16 // once:
17 //
18 // static int foo[N], bar[N], baz[N];
19 //
20 // for (i = 0; i < N; ++i) {
21 //    foo[i] = bar[i] * baz[i];
22 // }
23 //
24 //  On ARM the addresses of 3 arrays should be kept in the registers, thus
25 //  this code has quite large register pressure (loop body):
26 //
27 //  ldr     r1, [r5], #4
28 //  ldr     r2, [r6], #4
29 //  mul     r1, r2, r1
30 //  str     r1, [r0], #4
31 //
32 //  Pass converts the code to something like:
33 //
34 //  static struct {
35 //    int foo[N];
36 //    int bar[N];
37 //    int baz[N];
38 //  } merged;
39 //
40 //  for (i = 0; i < N; ++i) {
41 //    merged.foo[i] = merged.bar[i] * merged.baz[i];
42 //  }
43 //
44 //  and in ARM code this becomes:
45 //
46 //  ldr     r0, [r5, #40]
47 //  ldr     r1, [r5, #80]
48 //  mul     r0, r1, r0
49 //  str     r0, [r5], #4
50 //
51 //  note that we saved 2 registers here almostly "for free".
52 //
53 // However, merging globals can have tradeoffs:
54 // - it confuses debuggers, tools, and users
55 // - it makes linker optimizations less useful (order files, LOHs, ...)
56 // - it forces usage of indexed addressing (which isn't necessarily "free")
57 // - it can increase register pressure when the uses are disparate enough.
58 //
59 // We use heuristics to discover the best global grouping we can (cf cl::opts).
60 //
61 // ===---------------------------------------------------------------------===//
62 
63 #include "llvm/CodeGen/GlobalMerge.h"
64 #include "llvm/ADT/BitVector.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/SetVector.h"
67 #include "llvm/ADT/SmallVector.h"
68 #include "llvm/ADT/Statistic.h"
69 #include "llvm/ADT/StringRef.h"
70 #include "llvm/ADT/Twine.h"
71 #include "llvm/CodeGen/Passes.h"
72 #include "llvm/IR/BasicBlock.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DerivedTypes.h"
76 #include "llvm/IR/Function.h"
77 #include "llvm/IR/GlobalAlias.h"
78 #include "llvm/IR/GlobalValue.h"
79 #include "llvm/IR/GlobalVariable.h"
80 #include "llvm/IR/Instruction.h"
81 #include "llvm/IR/Module.h"
82 #include "llvm/IR/Type.h"
83 #include "llvm/IR/Use.h"
84 #include "llvm/IR/User.h"
85 #include "llvm/InitializePasses.h"
86 #include "llvm/MC/SectionKind.h"
87 #include "llvm/Pass.h"
88 #include "llvm/Support/Casting.h"
89 #include "llvm/Support/CommandLine.h"
90 #include "llvm/Support/Debug.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetLoweringObjectFile.h"
93 #include "llvm/Target/TargetMachine.h"
94 #include "llvm/TargetParser/Triple.h"
95 #include <algorithm>
96 #include <cassert>
97 #include <cstddef>
98 #include <cstdint>
99 #include <string>
100 #include <vector>
101 
102 using namespace llvm;
103 
104 #define DEBUG_TYPE "global-merge"
105 
106 // FIXME: This is only useful as a last-resort way to disable the pass.
107 static cl::opt<bool>
108 EnableGlobalMerge("enable-global-merge", cl::Hidden,
109                   cl::desc("Enable the global merge pass"),
110                   cl::init(true));
111 
112 static cl::opt<unsigned>
113 GlobalMergeMaxOffset("global-merge-max-offset", cl::Hidden,
114                      cl::desc("Set maximum offset for global merge pass"),
115                      cl::init(0));
116 
117 static cl::opt<bool> GlobalMergeGroupByUse(
118     "global-merge-group-by-use", cl::Hidden,
119     cl::desc("Improve global merge pass to look at uses"), cl::init(true));
120 
121 static cl::opt<bool> GlobalMergeIgnoreSingleUse(
122     "global-merge-ignore-single-use", cl::Hidden,
123     cl::desc("Improve global merge pass to ignore globals only used alone"),
124     cl::init(true));
125 
126 static cl::opt<bool>
127 EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden,
128                          cl::desc("Enable global merge pass on constants"),
129                          cl::init(false));
130 
131 // FIXME: this could be a transitional option, and we probably need to remove
132 // it if only we are sure this optimization could always benefit all targets.
133 static cl::opt<cl::boolOrDefault>
134 EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden,
135      cl::desc("Enable global merge pass on external linkage"));
136 
137 static cl::opt<unsigned>
138     GlobalMergeMinDataSize("global-merge-min-data-size",
139                            cl::desc("The minimum size in bytes of each global "
140                                     "that should considered in merging."),
141                            cl::init(0), cl::Hidden);
142 
143 STATISTIC(NumMerged, "Number of globals merged");
144 
145 namespace {
146 
147 class GlobalMergeImpl {
148   const TargetMachine *TM = nullptr;
149   GlobalMergeOptions Opt;
150   bool IsMachO = false;
151 
152 private:
153   bool doMerge(SmallVectorImpl<GlobalVariable *> &Globals, Module &M,
154                bool isConst, unsigned AddrSpace) const;
155 
156   /// Merge everything in \p Globals for which the corresponding bit
157   /// in \p GlobalSet is set.
158   bool doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
159                const BitVector &GlobalSet, Module &M, bool isConst,
160                unsigned AddrSpace) const;
161 
162   /// Check if the given variable has been identified as must keep
163   /// \pre setMustKeepGlobalVariables must have been called on the Module that
164   ///      contains GV
165   bool isMustKeepGlobalVariable(const GlobalVariable *GV) const {
166     return MustKeepGlobalVariables.count(GV);
167   }
168 
169   /// Collect every variables marked as "used" or used in a landing pad
170   /// instruction for this Module.
171   void setMustKeepGlobalVariables(Module &M);
172 
173   /// Collect every variables marked as "used"
174   void collectUsedGlobalVariables(Module &M, StringRef Name);
175 
176   /// Keep track of the GlobalVariable that must not be merged away
177   SmallSetVector<const GlobalVariable *, 16> MustKeepGlobalVariables;
178 
179 public:
180   GlobalMergeImpl(const TargetMachine *TM, GlobalMergeOptions Opt)
181       : TM(TM), Opt(Opt) {}
182   bool run(Module &M);
183 };
184 
185 class GlobalMerge : public FunctionPass {
186   const TargetMachine *TM = nullptr;
187   GlobalMergeOptions Opt;
188 
189 public:
190   static char ID; // Pass identification, replacement for typeid.
191 
192   explicit GlobalMerge() : FunctionPass(ID) {
193     Opt.MaxOffset = GlobalMergeMaxOffset;
194     initializeGlobalMergePass(*PassRegistry::getPassRegistry());
195   }
196 
197   explicit GlobalMerge(const TargetMachine *TM, unsigned MaximalOffset,
198                        bool OnlyOptimizeForSize, bool MergeExternalGlobals)
199       : FunctionPass(ID), TM(TM) {
200     Opt.MaxOffset = MaximalOffset;
201     Opt.SizeOnly = OnlyOptimizeForSize;
202     Opt.MergeExternal = MergeExternalGlobals;
203     initializeGlobalMergePass(*PassRegistry::getPassRegistry());
204   }
205 
206   bool doInitialization(Module &M) override {
207     auto GetSmallDataLimit = [](Module &M) -> std::optional<uint64_t> {
208       Metadata *SDL = M.getModuleFlag("SmallDataLimit");
209       if (!SDL)
210         return std::nullopt;
211       return mdconst::extract<ConstantInt>(SDL)->getZExtValue();
212     };
213     if (GlobalMergeMinDataSize.getNumOccurrences())
214       Opt.MinSize = GlobalMergeMinDataSize;
215     else if (auto SDL = GetSmallDataLimit(M); SDL && *SDL > 0)
216       Opt.MinSize = *SDL + 1;
217     else
218       Opt.MinSize = 0;
219 
220     GlobalMergeImpl P(TM, Opt);
221     return P.run(M);
222   }
223   bool runOnFunction(Function &F) override { return false; }
224 
225   StringRef getPassName() const override { return "Merge internal globals"; }
226 
227   void getAnalysisUsage(AnalysisUsage &AU) const override {
228     AU.setPreservesCFG();
229     FunctionPass::getAnalysisUsage(AU);
230   }
231 };
232 
233 } // end anonymous namespace
234 
235 PreservedAnalyses GlobalMergePass::run(Module &M, ModuleAnalysisManager &) {
236   GlobalMergeImpl P(TM, Options);
237   bool Changed = P.run(M);
238   if (!Changed)
239     return PreservedAnalyses::all();
240 
241   PreservedAnalyses PA;
242   PA.preserveSet<CFGAnalyses>();
243   return PA;
244 }
245 
246 char GlobalMerge::ID = 0;
247 
248 INITIALIZE_PASS(GlobalMerge, DEBUG_TYPE, "Merge global variables", false, false)
249 
250 bool GlobalMergeImpl::doMerge(SmallVectorImpl<GlobalVariable *> &Globals,
251                               Module &M, bool isConst,
252                               unsigned AddrSpace) const {
253   auto &DL = M.getDataLayout();
254   // FIXME: Find better heuristics
255   llvm::stable_sort(
256       Globals, [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
257         // We don't support scalable global variables.
258         return DL.getTypeAllocSize(GV1->getValueType()).getFixedValue() <
259                DL.getTypeAllocSize(GV2->getValueType()).getFixedValue();
260       });
261 
262   // If we want to just blindly group all globals together, do so.
263   if (!GlobalMergeGroupByUse) {
264     BitVector AllGlobals(Globals.size());
265     AllGlobals.set();
266     return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
267   }
268 
269   // If we want to be smarter, look at all uses of each global, to try to
270   // discover all sets of globals used together, and how many times each of
271   // these sets occurred.
272   //
273   // Keep this reasonably efficient, by having an append-only list of all sets
274   // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of
275   // code (currently, a Function) to the set of globals seen so far that are
276   // used together in that unit (GlobalUsesByFunction).
277   //
278   // When we look at the Nth global, we know that any new set is either:
279   // - the singleton set {N}, containing this global only, or
280   // - the union of {N} and a previously-discovered set, containing some
281   //   combination of the previous N-1 globals.
282   // Using that knowledge, when looking at the Nth global, we can keep:
283   // - a reference to the singleton set {N} (CurGVOnlySetIdx)
284   // - a list mapping each previous set to its union with {N} (EncounteredUGS),
285   //   if it actually occurs.
286 
287   // We keep track of the sets of globals used together "close enough".
288   struct UsedGlobalSet {
289     BitVector Globals;
290     unsigned UsageCount = 1;
291 
292     UsedGlobalSet(size_t Size) : Globals(Size) {}
293   };
294 
295   // Each set is unique in UsedGlobalSets.
296   std::vector<UsedGlobalSet> UsedGlobalSets;
297 
298   // Avoid repeating the create-global-set pattern.
299   auto CreateGlobalSet = [&]() -> UsedGlobalSet & {
300     UsedGlobalSets.emplace_back(Globals.size());
301     return UsedGlobalSets.back();
302   };
303 
304   // The first set is the empty set.
305   CreateGlobalSet().UsageCount = 0;
306 
307   // We define "close enough" to be "in the same function".
308   // FIXME: Grouping uses by function is way too aggressive, so we should have
309   // a better metric for distance between uses.
310   // The obvious alternative would be to group by BasicBlock, but that's in
311   // turn too conservative..
312   // Anything in between wouldn't be trivial to compute, so just stick with
313   // per-function grouping.
314 
315   // The value type is an index into UsedGlobalSets.
316   // The default (0) conveniently points to the empty set.
317   DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction;
318 
319   // Now, look at each merge-eligible global in turn.
320 
321   // Keep track of the sets we already encountered to which we added the
322   // current global.
323   // Each element matches the same-index element in UsedGlobalSets.
324   // This lets us efficiently tell whether a set has already been expanded to
325   // include the current global.
326   std::vector<size_t> EncounteredUGS;
327 
328   for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) {
329     GlobalVariable *GV = Globals[GI];
330 
331     // Reset the encountered sets for this global and grow it in case we created
332     // new sets for the previous global.
333     EncounteredUGS.assign(UsedGlobalSets.size(), 0);
334 
335     // We might need to create a set that only consists of the current global.
336     // Keep track of its index into UsedGlobalSets.
337     size_t CurGVOnlySetIdx = 0;
338 
339     // For each global, look at all its Uses.
340     for (auto &U : GV->uses()) {
341       // This Use might be a ConstantExpr.  We're interested in Instruction
342       // users, so look through ConstantExpr...
343       Use *UI, *UE;
344       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
345         if (CE->use_empty())
346           continue;
347         UI = &*CE->use_begin();
348         UE = nullptr;
349       } else if (isa<Instruction>(U.getUser())) {
350         UI = &U;
351         UE = UI->getNext();
352       } else {
353         continue;
354       }
355 
356       // ...to iterate on all the instruction users of the global.
357       // Note that we iterate on Uses and not on Users to be able to getNext().
358       for (; UI != UE; UI = UI->getNext()) {
359         Instruction *I = dyn_cast<Instruction>(UI->getUser());
360         if (!I)
361           continue;
362 
363         Function *ParentFn = I->getParent()->getParent();
364 
365         // If we're only optimizing for size, ignore non-minsize functions.
366         if (Opt.SizeOnly && !ParentFn->hasMinSize())
367           continue;
368 
369         size_t UGSIdx = GlobalUsesByFunction[ParentFn];
370 
371         // If this is the first global the basic block uses, map it to the set
372         // consisting of this global only.
373         if (!UGSIdx) {
374           // If that set doesn't exist yet, create it.
375           if (!CurGVOnlySetIdx) {
376             CurGVOnlySetIdx = UsedGlobalSets.size();
377             CreateGlobalSet().Globals.set(GI);
378           } else {
379             ++UsedGlobalSets[CurGVOnlySetIdx].UsageCount;
380           }
381 
382           GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx;
383           continue;
384         }
385 
386         // If we already encountered this BB, just increment the counter.
387         if (UsedGlobalSets[UGSIdx].Globals.test(GI)) {
388           ++UsedGlobalSets[UGSIdx].UsageCount;
389           continue;
390         }
391 
392         // If not, the previous set wasn't actually used in this function.
393         --UsedGlobalSets[UGSIdx].UsageCount;
394 
395         // If we already expanded the previous set to include this global, just
396         // reuse that expanded set.
397         if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) {
398           ++UsedGlobalSets[ExpandedIdx].UsageCount;
399           GlobalUsesByFunction[ParentFn] = ExpandedIdx;
400           continue;
401         }
402 
403         // If not, create a new set consisting of the union of the previous set
404         // and this global.  Mark it as encountered, so we can reuse it later.
405         GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] =
406             UsedGlobalSets.size();
407 
408         UsedGlobalSet &NewUGS = CreateGlobalSet();
409         NewUGS.Globals.set(GI);
410         NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals;
411       }
412     }
413   }
414 
415   // Now we found a bunch of sets of globals used together.  We accumulated
416   // the number of times we encountered the sets (i.e., the number of blocks
417   // that use that exact set of globals).
418   //
419   // Multiply that by the size of the set to give us a crude profitability
420   // metric.
421   llvm::stable_sort(UsedGlobalSets,
422                     [](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) {
423                       return UGS1.Globals.count() * UGS1.UsageCount <
424                              UGS2.Globals.count() * UGS2.UsageCount;
425                     });
426 
427   // We can choose to merge all globals together, but ignore globals never used
428   // with another global.  This catches the obviously non-profitable cases of
429   // having a single global, but is aggressive enough for any other case.
430   if (GlobalMergeIgnoreSingleUse) {
431     BitVector AllGlobals(Globals.size());
432     for (const UsedGlobalSet &UGS : llvm::reverse(UsedGlobalSets)) {
433       if (UGS.UsageCount == 0)
434         continue;
435       if (UGS.Globals.count() > 1)
436         AllGlobals |= UGS.Globals;
437     }
438     return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
439   }
440 
441   // Starting from the sets with the best (=biggest) profitability, find a
442   // good combination.
443   // The ideal (and expensive) solution can only be found by trying all
444   // combinations, looking for the one with the best profitability.
445   // Don't be smart about it, and just pick the first compatible combination,
446   // starting with the sets with the best profitability.
447   BitVector PickedGlobals(Globals.size());
448   bool Changed = false;
449 
450   for (const UsedGlobalSet &UGS : llvm::reverse(UsedGlobalSets)) {
451     if (UGS.UsageCount == 0)
452       continue;
453     if (PickedGlobals.anyCommon(UGS.Globals))
454       continue;
455     PickedGlobals |= UGS.Globals;
456     // If the set only contains one global, there's no point in merging.
457     // Ignore the global for inclusion in other sets though, so keep it in
458     // PickedGlobals.
459     if (UGS.Globals.count() < 2)
460       continue;
461     Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace);
462   }
463 
464   return Changed;
465 }
466 
467 bool GlobalMergeImpl::doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
468                               const BitVector &GlobalSet, Module &M,
469                               bool isConst, unsigned AddrSpace) const {
470   assert(Globals.size() > 1);
471 
472   Type *Int32Ty = Type::getInt32Ty(M.getContext());
473   Type *Int8Ty = Type::getInt8Ty(M.getContext());
474   auto &DL = M.getDataLayout();
475 
476   LLVM_DEBUG(dbgs() << " Trying to merge set, starts with #"
477                     << GlobalSet.find_first() << "\n");
478 
479   bool Changed = false;
480   ssize_t i = GlobalSet.find_first();
481   while (i != -1) {
482     ssize_t j = 0;
483     uint64_t MergedSize = 0;
484     std::vector<Type*> Tys;
485     std::vector<Constant*> Inits;
486     std::vector<unsigned> StructIdxs;
487 
488     bool HasExternal = false;
489     StringRef FirstExternalName;
490     Align MaxAlign;
491     unsigned CurIdx = 0;
492     for (j = i; j != -1; j = GlobalSet.find_next(j)) {
493       Type *Ty = Globals[j]->getValueType();
494 
495       // Make sure we use the same alignment AsmPrinter would use.
496       Align Alignment = DL.getPreferredAlign(Globals[j]);
497       unsigned Padding = alignTo(MergedSize, Alignment) - MergedSize;
498       MergedSize += Padding;
499       MergedSize += DL.getTypeAllocSize(Ty);
500       if (MergedSize > Opt.MaxOffset) {
501         break;
502       }
503       if (Padding) {
504         Tys.push_back(ArrayType::get(Int8Ty, Padding));
505         Inits.push_back(ConstantAggregateZero::get(Tys.back()));
506         ++CurIdx;
507       }
508       Tys.push_back(Ty);
509       Inits.push_back(Globals[j]->getInitializer());
510       StructIdxs.push_back(CurIdx++);
511 
512       MaxAlign = std::max(MaxAlign, Alignment);
513 
514       if (Globals[j]->hasExternalLinkage() && !HasExternal) {
515         HasExternal = true;
516         FirstExternalName = Globals[j]->getName();
517       }
518     }
519 
520     // Exit early if there is only one global to merge.
521     if (Tys.size() < 2) {
522       i = j;
523       continue;
524     }
525 
526     // If merged variables doesn't have external linkage, we needn't to expose
527     // the symbol after merging.
528     GlobalValue::LinkageTypes Linkage = HasExternal
529                                             ? GlobalValue::ExternalLinkage
530                                             : GlobalValue::InternalLinkage;
531     // Use a packed struct so we can control alignment.
532     StructType *MergedTy = StructType::get(M.getContext(), Tys, true);
533     Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);
534 
535     // On Darwin external linkage needs to be preserved, otherwise
536     // dsymutil cannot preserve the debug info for the merged
537     // variables.  If they have external linkage, use the symbol name
538     // of the first variable merged as the suffix of global symbol
539     // name.  This avoids a link-time naming conflict for the
540     // _MergedGlobals symbols.
541     Twine MergedName =
542         (IsMachO && HasExternal)
543             ? "_MergedGlobals_" + FirstExternalName
544             : "_MergedGlobals";
545     auto MergedLinkage = IsMachO ? Linkage : GlobalValue::PrivateLinkage;
546     auto *MergedGV = new GlobalVariable(
547         M, MergedTy, isConst, MergedLinkage, MergedInit, MergedName, nullptr,
548         GlobalVariable::NotThreadLocal, AddrSpace);
549 
550     MergedGV->setAlignment(MaxAlign);
551     MergedGV->setSection(Globals[i]->getSection());
552 
553     const StructLayout *MergedLayout = DL.getStructLayout(MergedTy);
554     for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k), ++idx) {
555       GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
556       std::string Name(Globals[k]->getName());
557       GlobalValue::VisibilityTypes Visibility = Globals[k]->getVisibility();
558       GlobalValue::DLLStorageClassTypes DLLStorage =
559           Globals[k]->getDLLStorageClass();
560 
561       // Copy metadata while adjusting any debug info metadata by the original
562       // global's offset within the merged global.
563       MergedGV->copyMetadata(Globals[k],
564                              MergedLayout->getElementOffset(StructIdxs[idx]));
565 
566       Constant *Idx[2] = {
567           ConstantInt::get(Int32Ty, 0),
568           ConstantInt::get(Int32Ty, StructIdxs[idx]),
569       };
570       Constant *GEP =
571           ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx);
572       Globals[k]->replaceAllUsesWith(GEP);
573       Globals[k]->eraseFromParent();
574 
575       // When the linkage is not internal we must emit an alias for the original
576       // variable name as it may be accessed from another object. On non-Mach-O
577       // we can also emit an alias for internal linkage as it's safe to do so.
578       // It's not safe on Mach-O as the alias (and thus the portion of the
579       // MergedGlobals variable) may be dead stripped at link time.
580       if (Linkage != GlobalValue::InternalLinkage || !IsMachO) {
581         GlobalAlias *GA = GlobalAlias::create(Tys[StructIdxs[idx]], AddrSpace,
582                                               Linkage, Name, GEP, &M);
583         GA->setVisibility(Visibility);
584         GA->setDLLStorageClass(DLLStorage);
585       }
586 
587       NumMerged++;
588     }
589     Changed = true;
590     i = j;
591   }
592 
593   return Changed;
594 }
595 
596 void GlobalMergeImpl::collectUsedGlobalVariables(Module &M, StringRef Name) {
597   // Extract global variables from llvm.used array
598   const GlobalVariable *GV = M.getGlobalVariable(Name);
599   if (!GV || !GV->hasInitializer()) return;
600 
601   // Should be an array of 'i8*'.
602   const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer());
603 
604   for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
605     if (const GlobalVariable *G =
606         dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts()))
607       MustKeepGlobalVariables.insert(G);
608 }
609 
610 void GlobalMergeImpl::setMustKeepGlobalVariables(Module &M) {
611   collectUsedGlobalVariables(M, "llvm.used");
612   collectUsedGlobalVariables(M, "llvm.compiler.used");
613 
614   for (Function &F : M) {
615     for (BasicBlock &BB : F) {
616       Instruction *Pad = BB.getFirstNonPHI();
617       if (!Pad->isEHPad())
618         continue;
619 
620       // Keep globals used by landingpads and catchpads.
621       for (const Use &U : Pad->operands()) {
622         if (const GlobalVariable *GV =
623                 dyn_cast<GlobalVariable>(U->stripPointerCasts()))
624           MustKeepGlobalVariables.insert(GV);
625         else if (const ConstantArray *CA = dyn_cast<ConstantArray>(U->stripPointerCasts())) {
626           for (const Use &Elt : CA->operands()) {
627             if (const GlobalVariable *GV =
628                     dyn_cast<GlobalVariable>(Elt->stripPointerCasts()))
629               MustKeepGlobalVariables.insert(GV);
630           }
631         }
632       }
633     }
634   }
635 }
636 
637 bool GlobalMergeImpl::run(Module &M) {
638   if (!EnableGlobalMerge)
639     return false;
640 
641   IsMachO = Triple(M.getTargetTriple()).isOSBinFormatMachO();
642 
643   auto &DL = M.getDataLayout();
644   DenseMap<std::pair<unsigned, StringRef>, SmallVector<GlobalVariable *, 16>>
645       Globals, ConstGlobals, BSSGlobals;
646   bool Changed = false;
647   setMustKeepGlobalVariables(M);
648 
649   LLVM_DEBUG({
650       dbgs() << "Number of GV that must be kept:  " <<
651                 MustKeepGlobalVariables.size() << "\n";
652       for (const GlobalVariable *KeptGV : MustKeepGlobalVariables)
653         dbgs() << "Kept: " << *KeptGV << "\n";
654   });
655   // Grab all non-const globals.
656   for (auto &GV : M.globals()) {
657     // Merge is safe for "normal" internal or external globals only
658     if (GV.isDeclaration() || GV.isThreadLocal() || GV.hasImplicitSection())
659       continue;
660 
661     // It's not safe to merge globals that may be preempted
662     if (TM && !TM->shouldAssumeDSOLocal(&GV))
663       continue;
664 
665     if (!(Opt.MergeExternal && GV.hasExternalLinkage()) &&
666         !GV.hasInternalLinkage())
667       continue;
668 
669     PointerType *PT = dyn_cast<PointerType>(GV.getType());
670     assert(PT && "Global variable is not a pointer!");
671 
672     unsigned AddressSpace = PT->getAddressSpace();
673     StringRef Section = GV.getSection();
674 
675     // Ignore all 'special' globals.
676     if (GV.getName().starts_with("llvm.") || GV.getName().starts_with(".llvm."))
677       continue;
678 
679     // Ignore all "required" globals:
680     if (isMustKeepGlobalVariable(&GV))
681       continue;
682 
683     // Don't merge tagged globals, as each global should have its own unique
684     // memory tag at runtime. TODO(hctim): This can be relaxed: constant globals
685     // with compatible alignment and the same contents may be merged as long as
686     // the globals occupy the same number of tag granules (i.e. `size_a / 16 ==
687     // size_b / 16`).
688     if (GV.isTagged())
689       continue;
690 
691     Type *Ty = GV.getValueType();
692     TypeSize AllocSize = DL.getTypeAllocSize(Ty);
693     if (AllocSize < Opt.MaxOffset && AllocSize >= Opt.MinSize) {
694       if (TM &&
695           TargetLoweringObjectFile::getKindForGlobal(&GV, *TM).isBSS())
696         BSSGlobals[{AddressSpace, Section}].push_back(&GV);
697       else if (GV.isConstant())
698         ConstGlobals[{AddressSpace, Section}].push_back(&GV);
699       else
700         Globals[{AddressSpace, Section}].push_back(&GV);
701     }
702   }
703 
704   for (auto &P : Globals)
705     if (P.second.size() > 1)
706       Changed |= doMerge(P.second, M, false, P.first.first);
707 
708   for (auto &P : BSSGlobals)
709     if (P.second.size() > 1)
710       Changed |= doMerge(P.second, M, false, P.first.first);
711 
712   if (EnableGlobalMergeOnConst)
713     for (auto &P : ConstGlobals)
714       if (P.second.size() > 1)
715         Changed |= doMerge(P.second, M, true, P.first.first);
716 
717   return Changed;
718 }
719 
720 Pass *llvm::createGlobalMergePass(const TargetMachine *TM, unsigned Offset,
721                                   bool OnlyOptimizeForSize,
722                                   bool MergeExternalByDefault) {
723   bool MergeExternal = (EnableGlobalMergeOnExternal == cl::BOU_UNSET) ?
724     MergeExternalByDefault : (EnableGlobalMergeOnExternal == cl::BOU_TRUE);
725   return new GlobalMerge(TM, Offset, OnlyOptimizeForSize, MergeExternal);
726 }
727