xref: /llvm-project/llvm/lib/CodeGen/GlobalMerge.cpp (revision 820f7548a14b9a53e77ada91ef345ba3b91b16e8)
1 //===-- GlobalMerge.cpp - Internal globals merging  -----------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 // This pass merges globals with internal linkage into one. This way all the
10 // globals which were merged into a biggest one can be addressed using offsets
11 // from the same base pointer (no need for separate base pointer for each of the
12 // global). Such a transformation can significantly reduce the register pressure
13 // when many globals are involved.
14 //
15 // For example, consider the code which touches several global variables at
16 // once:
17 //
18 // static int foo[N], bar[N], baz[N];
19 //
20 // for (i = 0; i < N; ++i) {
21 //    foo[i] = bar[i] * baz[i];
22 // }
23 //
24 //  On ARM the addresses of 3 arrays should be kept in the registers, thus
25 //  this code has quite large register pressure (loop body):
26 //
27 //  ldr     r1, [r5], #4
28 //  ldr     r2, [r6], #4
29 //  mul     r1, r2, r1
30 //  str     r1, [r0], #4
31 //
32 //  Pass converts the code to something like:
33 //
34 //  static struct {
35 //    int foo[N];
36 //    int bar[N];
37 //    int baz[N];
38 //  } merged;
39 //
40 //  for (i = 0; i < N; ++i) {
41 //    merged.foo[i] = merged.bar[i] * merged.baz[i];
42 //  }
43 //
44 //  and in ARM code this becomes:
45 //
46 //  ldr     r0, [r5, #40]
47 //  ldr     r1, [r5, #80]
48 //  mul     r0, r1, r0
49 //  str     r0, [r5], #4
50 //
51 //  note that we saved 2 registers here almostly "for free".
52 //
53 // However, merging globals can have tradeoffs:
54 // - it confuses debuggers, tools, and users
55 // - it makes linker optimizations less useful (order files, LOHs, ...)
56 // - it forces usage of indexed addressing (which isn't necessarily "free")
57 // - it can increase register pressure when the uses are disparate enough.
58 //
59 // We use heuristics to discover the best global grouping we can (cf cl::opts).
60 // ===---------------------------------------------------------------------===//
61 
62 #include "llvm/ADT/DenseMap.h"
63 #include "llvm/ADT/SmallBitVector.h"
64 #include "llvm/ADT/SmallPtrSet.h"
65 #include "llvm/ADT/Statistic.h"
66 #include "llvm/CodeGen/Passes.h"
67 #include "llvm/IR/Attributes.h"
68 #include "llvm/IR/Constants.h"
69 #include "llvm/IR/DataLayout.h"
70 #include "llvm/IR/DerivedTypes.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GlobalVariable.h"
73 #include "llvm/IR/Instructions.h"
74 #include "llvm/IR/Intrinsics.h"
75 #include "llvm/IR/Module.h"
76 #include "llvm/Pass.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Target/TargetLowering.h"
81 #include "llvm/Target/TargetLoweringObjectFile.h"
82 #include "llvm/Target/TargetSubtargetInfo.h"
83 #include <algorithm>
84 using namespace llvm;
85 
86 #define DEBUG_TYPE "global-merge"
87 
88 // FIXME: This is only useful as a last-resort way to disable the pass.
89 static cl::opt<bool>
90 EnableGlobalMerge("enable-global-merge", cl::Hidden,
91                   cl::desc("Enable the global merge pass"),
92                   cl::init(true));
93 
94 static cl::opt<bool> GlobalMergeGroupByUse(
95     "global-merge-group-by-use", cl::Hidden,
96     cl::desc("Improve global merge pass to look at uses"), cl::init(true));
97 
98 static cl::opt<bool> GlobalMergeIgnoreSingleUse(
99     "global-merge-ignore-single-use", cl::Hidden,
100     cl::desc("Improve global merge pass to ignore globals only used alone"),
101     cl::init(true));
102 
103 static cl::opt<bool>
104 EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden,
105                          cl::desc("Enable global merge pass on constants"),
106                          cl::init(false));
107 
108 // FIXME: this could be a transitional option, and we probably need to remove
109 // it if only we are sure this optimization could always benefit all targets.
110 static cl::opt<cl::boolOrDefault>
111 EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden,
112      cl::desc("Enable global merge pass on external linkage"));
113 
114 STATISTIC(NumMerged, "Number of globals merged");
115 namespace {
116   class GlobalMerge : public FunctionPass {
117     const TargetMachine *TM;
118     // FIXME: Infer the maximum possible offset depending on the actual users
119     // (these max offsets are different for the users inside Thumb or ARM
120     // functions), see the code that passes in the offset in the ARM backend
121     // for more information.
122     unsigned MaxOffset;
123 
124     /// Whether we should try to optimize for size only.
125     /// Currently, this applies a dead simple heuristic: only consider globals
126     /// used in minsize functions for merging.
127     /// FIXME: This could learn about optsize, and be used in the cost model.
128     bool OnlyOptimizeForSize;
129 
130     /// Whether we should merge global variables that have external linkage.
131     bool MergeExternalGlobals;
132 
133     bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
134                  Module &M, bool isConst, unsigned AddrSpace) const;
135     /// \brief Merge everything in \p Globals for which the corresponding bit
136     /// in \p GlobalSet is set.
137     bool doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
138                  const BitVector &GlobalSet, Module &M, bool isConst,
139                  unsigned AddrSpace) const;
140 
141     /// \brief Check if the given variable has been identified as must keep
142     /// \pre setMustKeepGlobalVariables must have been called on the Module that
143     ///      contains GV
144     bool isMustKeepGlobalVariable(const GlobalVariable *GV) const {
145       return MustKeepGlobalVariables.count(GV);
146     }
147 
148     /// Collect every variables marked as "used" or used in a landing pad
149     /// instruction for this Module.
150     void setMustKeepGlobalVariables(Module &M);
151 
152     /// Collect every variables marked as "used"
153     void collectUsedGlobalVariables(Module &M);
154 
155     /// Keep track of the GlobalVariable that must not be merged away
156     SmallPtrSet<const GlobalVariable *, 16> MustKeepGlobalVariables;
157 
158   public:
159     static char ID;             // Pass identification, replacement for typeid.
160     explicit GlobalMerge(const TargetMachine *TM = nullptr,
161                          unsigned MaximalOffset = 0,
162                          bool OnlyOptimizeForSize = false,
163                          bool MergeExternalGlobals = false)
164         : FunctionPass(ID), TM(TM), MaxOffset(MaximalOffset),
165           OnlyOptimizeForSize(OnlyOptimizeForSize),
166           MergeExternalGlobals(MergeExternalGlobals) {
167       initializeGlobalMergePass(*PassRegistry::getPassRegistry());
168     }
169 
170     bool doInitialization(Module &M) override;
171     bool runOnFunction(Function &F) override;
172     bool doFinalization(Module &M) override;
173 
174     const char *getPassName() const override {
175       return "Merge internal globals";
176     }
177 
178     void getAnalysisUsage(AnalysisUsage &AU) const override {
179       AU.setPreservesCFG();
180       FunctionPass::getAnalysisUsage(AU);
181     }
182   };
183 } // end anonymous namespace
184 
185 char GlobalMerge::ID = 0;
186 INITIALIZE_PASS_BEGIN(GlobalMerge, "global-merge", "Merge global variables",
187                       false, false)
188 INITIALIZE_PASS_END(GlobalMerge, "global-merge", "Merge global variables",
189                     false, false)
190 
191 bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
192                           Module &M, bool isConst, unsigned AddrSpace) const {
193   auto &DL = M.getDataLayout();
194   // FIXME: Find better heuristics
195   std::stable_sort(Globals.begin(), Globals.end(),
196                    [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
197                      return DL.getTypeAllocSize(GV1->getValueType()) <
198                             DL.getTypeAllocSize(GV2->getValueType());
199                    });
200 
201   // If we want to just blindly group all globals together, do so.
202   if (!GlobalMergeGroupByUse) {
203     BitVector AllGlobals(Globals.size());
204     AllGlobals.set();
205     return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
206   }
207 
208   // If we want to be smarter, look at all uses of each global, to try to
209   // discover all sets of globals used together, and how many times each of
210   // these sets occurred.
211   //
212   // Keep this reasonably efficient, by having an append-only list of all sets
213   // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of
214   // code (currently, a Function) to the set of globals seen so far that are
215   // used together in that unit (GlobalUsesByFunction).
216   //
217   // When we look at the Nth global, we now that any new set is either:
218   // - the singleton set {N}, containing this global only, or
219   // - the union of {N} and a previously-discovered set, containing some
220   //   combination of the previous N-1 globals.
221   // Using that knowledge, when looking at the Nth global, we can keep:
222   // - a reference to the singleton set {N} (CurGVOnlySetIdx)
223   // - a list mapping each previous set to its union with {N} (EncounteredUGS),
224   //   if it actually occurs.
225 
226   // We keep track of the sets of globals used together "close enough".
227   struct UsedGlobalSet {
228     UsedGlobalSet(size_t Size) : Globals(Size), UsageCount(1) {}
229     BitVector Globals;
230     unsigned UsageCount;
231   };
232 
233   // Each set is unique in UsedGlobalSets.
234   std::vector<UsedGlobalSet> UsedGlobalSets;
235 
236   // Avoid repeating the create-global-set pattern.
237   auto CreateGlobalSet = [&]() -> UsedGlobalSet & {
238     UsedGlobalSets.emplace_back(Globals.size());
239     return UsedGlobalSets.back();
240   };
241 
242   // The first set is the empty set.
243   CreateGlobalSet().UsageCount = 0;
244 
245   // We define "close enough" to be "in the same function".
246   // FIXME: Grouping uses by function is way too aggressive, so we should have
247   // a better metric for distance between uses.
248   // The obvious alternative would be to group by BasicBlock, but that's in
249   // turn too conservative..
250   // Anything in between wouldn't be trivial to compute, so just stick with
251   // per-function grouping.
252 
253   // The value type is an index into UsedGlobalSets.
254   // The default (0) conveniently points to the empty set.
255   DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction;
256 
257   // Now, look at each merge-eligible global in turn.
258 
259   // Keep track of the sets we already encountered to which we added the
260   // current global.
261   // Each element matches the same-index element in UsedGlobalSets.
262   // This lets us efficiently tell whether a set has already been expanded to
263   // include the current global.
264   std::vector<size_t> EncounteredUGS;
265 
266   for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) {
267     GlobalVariable *GV = Globals[GI];
268 
269     // Reset the encountered sets for this global...
270     std::fill(EncounteredUGS.begin(), EncounteredUGS.end(), 0);
271     // ...and grow it in case we created new sets for the previous global.
272     EncounteredUGS.resize(UsedGlobalSets.size());
273 
274     // We might need to create a set that only consists of the current global.
275     // Keep track of its index into UsedGlobalSets.
276     size_t CurGVOnlySetIdx = 0;
277 
278     // For each global, look at all its Uses.
279     for (auto &U : GV->uses()) {
280       // This Use might be a ConstantExpr.  We're interested in Instruction
281       // users, so look through ConstantExpr...
282       Use *UI, *UE;
283       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
284         if (CE->use_empty())
285           continue;
286         UI = &*CE->use_begin();
287         UE = nullptr;
288       } else if (isa<Instruction>(U.getUser())) {
289         UI = &U;
290         UE = UI->getNext();
291       } else {
292         continue;
293       }
294 
295       // ...to iterate on all the instruction users of the global.
296       // Note that we iterate on Uses and not on Users to be able to getNext().
297       for (; UI != UE; UI = UI->getNext()) {
298         Instruction *I = dyn_cast<Instruction>(UI->getUser());
299         if (!I)
300           continue;
301 
302         Function *ParentFn = I->getParent()->getParent();
303 
304         // If we're only optimizing for size, ignore non-minsize functions.
305         if (OnlyOptimizeForSize && !ParentFn->optForMinSize())
306           continue;
307 
308         size_t UGSIdx = GlobalUsesByFunction[ParentFn];
309 
310         // If this is the first global the basic block uses, map it to the set
311         // consisting of this global only.
312         if (!UGSIdx) {
313           // If that set doesn't exist yet, create it.
314           if (!CurGVOnlySetIdx) {
315             CurGVOnlySetIdx = UsedGlobalSets.size();
316             CreateGlobalSet().Globals.set(GI);
317           } else {
318             ++UsedGlobalSets[CurGVOnlySetIdx].UsageCount;
319           }
320 
321           GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx;
322           continue;
323         }
324 
325         // If we already encountered this BB, just increment the counter.
326         if (UsedGlobalSets[UGSIdx].Globals.test(GI)) {
327           ++UsedGlobalSets[UGSIdx].UsageCount;
328           continue;
329         }
330 
331         // If not, the previous set wasn't actually used in this function.
332         --UsedGlobalSets[UGSIdx].UsageCount;
333 
334         // If we already expanded the previous set to include this global, just
335         // reuse that expanded set.
336         if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) {
337           ++UsedGlobalSets[ExpandedIdx].UsageCount;
338           GlobalUsesByFunction[ParentFn] = ExpandedIdx;
339           continue;
340         }
341 
342         // If not, create a new set consisting of the union of the previous set
343         // and this global.  Mark it as encountered, so we can reuse it later.
344         GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] =
345             UsedGlobalSets.size();
346 
347         UsedGlobalSet &NewUGS = CreateGlobalSet();
348         NewUGS.Globals.set(GI);
349         NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals;
350       }
351     }
352   }
353 
354   // Now we found a bunch of sets of globals used together.  We accumulated
355   // the number of times we encountered the sets (i.e., the number of blocks
356   // that use that exact set of globals).
357   //
358   // Multiply that by the size of the set to give us a crude profitability
359   // metric.
360   std::sort(UsedGlobalSets.begin(), UsedGlobalSets.end(),
361             [](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) {
362               return UGS1.Globals.count() * UGS1.UsageCount <
363                      UGS2.Globals.count() * UGS2.UsageCount;
364             });
365 
366   // We can choose to merge all globals together, but ignore globals never used
367   // with another global.  This catches the obviously non-profitable cases of
368   // having a single global, but is aggressive enough for any other case.
369   if (GlobalMergeIgnoreSingleUse) {
370     BitVector AllGlobals(Globals.size());
371     for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
372       const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
373       if (UGS.UsageCount == 0)
374         continue;
375       if (UGS.Globals.count() > 1)
376         AllGlobals |= UGS.Globals;
377     }
378     return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
379   }
380 
381   // Starting from the sets with the best (=biggest) profitability, find a
382   // good combination.
383   // The ideal (and expensive) solution can only be found by trying all
384   // combinations, looking for the one with the best profitability.
385   // Don't be smart about it, and just pick the first compatible combination,
386   // starting with the sets with the best profitability.
387   BitVector PickedGlobals(Globals.size());
388   bool Changed = false;
389 
390   for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
391     const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
392     if (UGS.UsageCount == 0)
393       continue;
394     if (PickedGlobals.anyCommon(UGS.Globals))
395       continue;
396     PickedGlobals |= UGS.Globals;
397     // If the set only contains one global, there's no point in merging.
398     // Ignore the global for inclusion in other sets though, so keep it in
399     // PickedGlobals.
400     if (UGS.Globals.count() < 2)
401       continue;
402     Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace);
403   }
404 
405   return Changed;
406 }
407 
408 bool GlobalMerge::doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
409                           const BitVector &GlobalSet, Module &M, bool isConst,
410                           unsigned AddrSpace) const {
411   assert(Globals.size() > 1);
412 
413   Type *Int32Ty = Type::getInt32Ty(M.getContext());
414   auto &DL = M.getDataLayout();
415 
416   DEBUG(dbgs() << " Trying to merge set, starts with #"
417                << GlobalSet.find_first() << "\n");
418 
419   ssize_t i = GlobalSet.find_first();
420   while (i != -1) {
421     ssize_t j = 0;
422     uint64_t MergedSize = 0;
423     std::vector<Type*> Tys;
424     std::vector<Constant*> Inits;
425 
426     for (j = i; j != -1; j = GlobalSet.find_next(j)) {
427       Type *Ty = Globals[j]->getValueType();
428       MergedSize += DL.getTypeAllocSize(Ty);
429       if (MergedSize > MaxOffset) {
430         break;
431       }
432       Tys.push_back(Ty);
433       Inits.push_back(Globals[j]->getInitializer());
434     }
435 
436     StructType *MergedTy = StructType::get(M.getContext(), Tys);
437     Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);
438 
439     GlobalVariable *MergedGV = new GlobalVariable(
440         M, MergedTy, isConst, GlobalValue::PrivateLinkage, MergedInit,
441         "_MergedGlobals", nullptr, GlobalVariable::NotThreadLocal, AddrSpace);
442 
443     for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k), ++idx) {
444       GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
445       std::string Name = Globals[k]->getName();
446 
447       Constant *Idx[2] = {
448         ConstantInt::get(Int32Ty, 0),
449         ConstantInt::get(Int32Ty, idx),
450       };
451       Constant *GEP =
452           ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx);
453       Globals[k]->replaceAllUsesWith(GEP);
454       Globals[k]->eraseFromParent();
455 
456       // When the linkage is not internal we must emit an alias for the original
457       // variable name as it may be accessed from another object. On non-Mach-O
458       // we can also emit an alias for internal linkage as it's safe to do so.
459       // It's not safe on Mach-O as the alias (and thus the portion of the
460       // MergedGlobals variable) may be dead stripped at link time.
461       if (Linkage != GlobalValue::InternalLinkage ||
462           !TM->getTargetTriple().isOSBinFormatMachO()) {
463         GlobalAlias::create(Tys[idx], AddrSpace, Linkage, Name, GEP, &M);
464       }
465 
466       NumMerged++;
467     }
468     i = j;
469   }
470 
471   return true;
472 }
473 
474 void GlobalMerge::collectUsedGlobalVariables(Module &M) {
475   // Extract global variables from llvm.used array
476   const GlobalVariable *GV = M.getGlobalVariable("llvm.used");
477   if (!GV || !GV->hasInitializer()) return;
478 
479   // Should be an array of 'i8*'.
480   const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer());
481 
482   for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
483     if (const GlobalVariable *G =
484         dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts()))
485       MustKeepGlobalVariables.insert(G);
486 }
487 
488 void GlobalMerge::setMustKeepGlobalVariables(Module &M) {
489   collectUsedGlobalVariables(M);
490 
491   for (Module::iterator IFn = M.begin(), IEndFn = M.end(); IFn != IEndFn;
492        ++IFn) {
493     for (Function::iterator IBB = IFn->begin(), IEndBB = IFn->end();
494          IBB != IEndBB; ++IBB) {
495       // Follow the invoke link to find the landing pad instruction
496       const InvokeInst *II = dyn_cast<InvokeInst>(IBB->getTerminator());
497       if (!II) continue;
498 
499       const LandingPadInst *LPInst = II->getUnwindDest()->getLandingPadInst();
500       // Look for globals in the clauses of the landing pad instruction
501       for (unsigned Idx = 0, NumClauses = LPInst->getNumClauses();
502            Idx != NumClauses; ++Idx)
503         if (const GlobalVariable *GV =
504             dyn_cast<GlobalVariable>(LPInst->getClause(Idx)
505                                      ->stripPointerCasts()))
506           MustKeepGlobalVariables.insert(GV);
507     }
508   }
509 }
510 
511 bool GlobalMerge::doInitialization(Module &M) {
512   if (!EnableGlobalMerge)
513     return false;
514 
515   auto &DL = M.getDataLayout();
516   DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals,
517                                                         BSSGlobals;
518   bool Changed = false;
519   setMustKeepGlobalVariables(M);
520 
521   // Grab all non-const globals.
522   for (auto &GV : M.globals()) {
523     // Merge is safe for "normal" internal or external globals only
524     if (GV.isDeclaration() || GV.isThreadLocal() || GV.hasSection())
525       continue;
526 
527     if (!(MergeExternalGlobals && GV.hasExternalLinkage()) &&
528         !GV.hasInternalLinkage())
529       continue;
530 
531     PointerType *PT = dyn_cast<PointerType>(GV.getType());
532     assert(PT && "Global variable is not a pointer!");
533 
534     unsigned AddressSpace = PT->getAddressSpace();
535 
536     // Ignore fancy-aligned globals for now.
537     unsigned Alignment = DL.getPreferredAlignment(&GV);
538     Type *Ty = GV.getValueType();
539     if (Alignment > DL.getABITypeAlignment(Ty))
540       continue;
541 
542     // Ignore all 'special' globals.
543     if (GV.getName().startswith("llvm.") ||
544         GV.getName().startswith(".llvm."))
545       continue;
546 
547     // Ignore all "required" globals:
548     if (isMustKeepGlobalVariable(&GV))
549       continue;
550 
551     if (DL.getTypeAllocSize(Ty) < MaxOffset) {
552       if (TargetLoweringObjectFile::getKindForGlobal(&GV, *TM).isBSSLocal())
553         BSSGlobals[AddressSpace].push_back(&GV);
554       else if (GV.isConstant())
555         ConstGlobals[AddressSpace].push_back(&GV);
556       else
557         Globals[AddressSpace].push_back(&GV);
558     }
559   }
560 
561   for (auto &P : Globals)
562     if (P.second.size() > 1)
563       Changed |= doMerge(P.second, M, false, P.first);
564 
565   for (auto &P : BSSGlobals)
566     if (P.second.size() > 1)
567       Changed |= doMerge(P.second, M, false, P.first);
568 
569   if (EnableGlobalMergeOnConst)
570     for (auto &P : ConstGlobals)
571       if (P.second.size() > 1)
572         Changed |= doMerge(P.second, M, true, P.first);
573 
574   return Changed;
575 }
576 
577 bool GlobalMerge::runOnFunction(Function &F) {
578   return false;
579 }
580 
581 bool GlobalMerge::doFinalization(Module &M) {
582   MustKeepGlobalVariables.clear();
583   return false;
584 }
585 
586 Pass *llvm::createGlobalMergePass(const TargetMachine *TM, unsigned Offset,
587                                   bool OnlyOptimizeForSize,
588                                   bool MergeExternalByDefault) {
589   bool MergeExternal = (EnableGlobalMergeOnExternal == cl::BOU_UNSET) ?
590     MergeExternalByDefault : (EnableGlobalMergeOnExternal == cl::BOU_TRUE);
591   return new GlobalMerge(TM, Offset, OnlyOptimizeForSize, MergeExternal);
592 }
593