1 //===- GlobalMerge.cpp - Internal globals merging -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass merges globals with internal linkage into one. This way all the 10 // globals which were merged into a biggest one can be addressed using offsets 11 // from the same base pointer (no need for separate base pointer for each of the 12 // global). Such a transformation can significantly reduce the register pressure 13 // when many globals are involved. 14 // 15 // For example, consider the code which touches several global variables at 16 // once: 17 // 18 // static int foo[N], bar[N], baz[N]; 19 // 20 // for (i = 0; i < N; ++i) { 21 // foo[i] = bar[i] * baz[i]; 22 // } 23 // 24 // On ARM the addresses of 3 arrays should be kept in the registers, thus 25 // this code has quite large register pressure (loop body): 26 // 27 // ldr r1, [r5], #4 28 // ldr r2, [r6], #4 29 // mul r1, r2, r1 30 // str r1, [r0], #4 31 // 32 // Pass converts the code to something like: 33 // 34 // static struct { 35 // int foo[N]; 36 // int bar[N]; 37 // int baz[N]; 38 // } merged; 39 // 40 // for (i = 0; i < N; ++i) { 41 // merged.foo[i] = merged.bar[i] * merged.baz[i]; 42 // } 43 // 44 // and in ARM code this becomes: 45 // 46 // ldr r0, [r5, #40] 47 // ldr r1, [r5, #80] 48 // mul r0, r1, r0 49 // str r0, [r5], #4 50 // 51 // note that we saved 2 registers here almostly "for free". 52 // 53 // However, merging globals can have tradeoffs: 54 // - it confuses debuggers, tools, and users 55 // - it makes linker optimizations less useful (order files, LOHs, ...) 56 // - it forces usage of indexed addressing (which isn't necessarily "free") 57 // - it can increase register pressure when the uses are disparate enough. 58 // 59 // We use heuristics to discover the best global grouping we can (cf cl::opts). 60 // 61 // ===---------------------------------------------------------------------===// 62 63 #include "llvm/CodeGen/GlobalMerge.h" 64 #include "llvm/ADT/BitVector.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/MapVector.h" 67 #include "llvm/ADT/SetVector.h" 68 #include "llvm/ADT/SmallVector.h" 69 #include "llvm/ADT/Statistic.h" 70 #include "llvm/ADT/StringRef.h" 71 #include "llvm/ADT/Twine.h" 72 #include "llvm/CodeGen/Passes.h" 73 #include "llvm/IR/BasicBlock.h" 74 #include "llvm/IR/Constants.h" 75 #include "llvm/IR/DataLayout.h" 76 #include "llvm/IR/DerivedTypes.h" 77 #include "llvm/IR/Function.h" 78 #include "llvm/IR/GlobalAlias.h" 79 #include "llvm/IR/GlobalValue.h" 80 #include "llvm/IR/GlobalVariable.h" 81 #include "llvm/IR/Instruction.h" 82 #include "llvm/IR/IntrinsicInst.h" 83 #include "llvm/IR/Module.h" 84 #include "llvm/IR/Type.h" 85 #include "llvm/IR/Use.h" 86 #include "llvm/IR/User.h" 87 #include "llvm/InitializePasses.h" 88 #include "llvm/MC/SectionKind.h" 89 #include "llvm/Pass.h" 90 #include "llvm/Support/Casting.h" 91 #include "llvm/Support/CommandLine.h" 92 #include "llvm/Support/Debug.h" 93 #include "llvm/Support/raw_ostream.h" 94 #include "llvm/Target/TargetLoweringObjectFile.h" 95 #include "llvm/Target/TargetMachine.h" 96 #include "llvm/TargetParser/Triple.h" 97 #include <algorithm> 98 #include <cassert> 99 #include <cstddef> 100 #include <cstdint> 101 #include <string> 102 #include <vector> 103 104 using namespace llvm; 105 106 #define DEBUG_TYPE "global-merge" 107 108 // FIXME: This is only useful as a last-resort way to disable the pass. 109 static cl::opt<bool> 110 EnableGlobalMerge("enable-global-merge", cl::Hidden, 111 cl::desc("Enable the global merge pass"), 112 cl::init(true)); 113 114 static cl::opt<unsigned> 115 GlobalMergeMaxOffset("global-merge-max-offset", cl::Hidden, 116 cl::desc("Set maximum offset for global merge pass"), 117 cl::init(0)); 118 119 static cl::opt<bool> GlobalMergeGroupByUse( 120 "global-merge-group-by-use", cl::Hidden, 121 cl::desc("Improve global merge pass to look at uses"), cl::init(true)); 122 123 static cl::opt<bool> GlobalMergeAllConst( 124 "global-merge-all-const", cl::Hidden, 125 cl::desc("Merge all const globals without looking at uses"), 126 cl::init(false)); 127 128 static cl::opt<bool> GlobalMergeIgnoreSingleUse( 129 "global-merge-ignore-single-use", cl::Hidden, 130 cl::desc("Improve global merge pass to ignore globals only used alone"), 131 cl::init(true)); 132 133 static cl::opt<bool> 134 EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden, 135 cl::desc("Enable global merge pass on constants"), 136 cl::init(false)); 137 138 // FIXME: this could be a transitional option, and we probably need to remove 139 // it if only we are sure this optimization could always benefit all targets. 140 static cl::opt<cl::boolOrDefault> 141 EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden, 142 cl::desc("Enable global merge pass on external linkage")); 143 144 static cl::opt<unsigned> 145 GlobalMergeMinDataSize("global-merge-min-data-size", 146 cl::desc("The minimum size in bytes of each global " 147 "that should considered in merging."), 148 cl::init(0), cl::Hidden); 149 150 STATISTIC(NumMerged, "Number of globals merged"); 151 152 namespace { 153 154 class GlobalMergeImpl { 155 const TargetMachine *TM = nullptr; 156 GlobalMergeOptions Opt; 157 bool IsMachO = false; 158 159 private: 160 bool doMerge(SmallVectorImpl<GlobalVariable *> &Globals, Module &M, 161 bool isConst, unsigned AddrSpace) const; 162 163 /// Merge everything in \p Globals for which the corresponding bit 164 /// in \p GlobalSet is set. 165 bool doMerge(const SmallVectorImpl<GlobalVariable *> &Globals, 166 const BitVector &GlobalSet, Module &M, bool isConst, 167 unsigned AddrSpace) const; 168 169 /// Check if the given variable has been identified as must keep 170 /// \pre setMustKeepGlobalVariables must have been called on the Module that 171 /// contains GV 172 bool isMustKeepGlobalVariable(const GlobalVariable *GV) const { 173 return MustKeepGlobalVariables.count(GV); 174 } 175 176 /// Collect every variables marked as "used" or used in a landing pad 177 /// instruction for this Module. 178 void setMustKeepGlobalVariables(Module &M); 179 180 /// Collect every variables marked as "used" 181 void collectUsedGlobalVariables(Module &M, StringRef Name); 182 183 /// Keep track of the GlobalVariable that must not be merged away 184 SmallSetVector<const GlobalVariable *, 16> MustKeepGlobalVariables; 185 186 public: 187 GlobalMergeImpl(const TargetMachine *TM, GlobalMergeOptions Opt) 188 : TM(TM), Opt(Opt) {} 189 bool run(Module &M); 190 }; 191 192 class GlobalMerge : public FunctionPass { 193 const TargetMachine *TM = nullptr; 194 GlobalMergeOptions Opt; 195 196 public: 197 static char ID; // Pass identification, replacement for typeid. 198 199 explicit GlobalMerge() : FunctionPass(ID) { 200 Opt.MaxOffset = GlobalMergeMaxOffset; 201 initializeGlobalMergePass(*PassRegistry::getPassRegistry()); 202 } 203 204 explicit GlobalMerge(const TargetMachine *TM, unsigned MaximalOffset, 205 bool OnlyOptimizeForSize, bool MergeExternalGlobals, 206 bool MergeConstantGlobals, bool MergeConstAggressive) 207 : FunctionPass(ID), TM(TM) { 208 Opt.MaxOffset = MaximalOffset; 209 Opt.SizeOnly = OnlyOptimizeForSize; 210 Opt.MergeExternal = MergeExternalGlobals; 211 Opt.MergeConstantGlobals = MergeConstantGlobals; 212 Opt.MergeConstAggressive = MergeConstAggressive; 213 initializeGlobalMergePass(*PassRegistry::getPassRegistry()); 214 } 215 216 bool doInitialization(Module &M) override { 217 auto GetSmallDataLimit = [](Module &M) -> std::optional<uint64_t> { 218 Metadata *SDL = M.getModuleFlag("SmallDataLimit"); 219 if (!SDL) 220 return std::nullopt; 221 return mdconst::extract<ConstantInt>(SDL)->getZExtValue(); 222 }; 223 if (GlobalMergeMinDataSize.getNumOccurrences()) 224 Opt.MinSize = GlobalMergeMinDataSize; 225 else if (auto SDL = GetSmallDataLimit(M); SDL && *SDL > 0) 226 Opt.MinSize = *SDL + 1; 227 else 228 Opt.MinSize = 0; 229 230 GlobalMergeImpl P(TM, Opt); 231 return P.run(M); 232 } 233 bool runOnFunction(Function &F) override { return false; } 234 235 StringRef getPassName() const override { return "Merge internal globals"; } 236 237 void getAnalysisUsage(AnalysisUsage &AU) const override { 238 AU.setPreservesCFG(); 239 FunctionPass::getAnalysisUsage(AU); 240 } 241 }; 242 243 } // end anonymous namespace 244 245 PreservedAnalyses GlobalMergePass::run(Module &M, ModuleAnalysisManager &) { 246 GlobalMergeImpl P(TM, Options); 247 bool Changed = P.run(M); 248 if (!Changed) 249 return PreservedAnalyses::all(); 250 251 PreservedAnalyses PA; 252 PA.preserveSet<CFGAnalyses>(); 253 return PA; 254 } 255 256 char GlobalMerge::ID = 0; 257 258 INITIALIZE_PASS(GlobalMerge, DEBUG_TYPE, "Merge global variables", false, false) 259 260 bool GlobalMergeImpl::doMerge(SmallVectorImpl<GlobalVariable *> &Globals, 261 Module &M, bool isConst, 262 unsigned AddrSpace) const { 263 auto &DL = M.getDataLayout(); 264 // FIXME: Find better heuristics 265 llvm::stable_sort( 266 Globals, [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) { 267 // We don't support scalable global variables. 268 return DL.getTypeAllocSize(GV1->getValueType()).getFixedValue() < 269 DL.getTypeAllocSize(GV2->getValueType()).getFixedValue(); 270 }); 271 272 // If we want to just blindly group all globals together, do so. 273 if (!GlobalMergeGroupByUse || (Opt.MergeConstAggressive && isConst)) { 274 BitVector AllGlobals(Globals.size()); 275 AllGlobals.set(); 276 return doMerge(Globals, AllGlobals, M, isConst, AddrSpace); 277 } 278 279 // If we want to be smarter, look at all uses of each global, to try to 280 // discover all sets of globals used together, and how many times each of 281 // these sets occurred. 282 // 283 // Keep this reasonably efficient, by having an append-only list of all sets 284 // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of 285 // code (currently, a Function) to the set of globals seen so far that are 286 // used together in that unit (GlobalUsesByFunction). 287 // 288 // When we look at the Nth global, we know that any new set is either: 289 // - the singleton set {N}, containing this global only, or 290 // - the union of {N} and a previously-discovered set, containing some 291 // combination of the previous N-1 globals. 292 // Using that knowledge, when looking at the Nth global, we can keep: 293 // - a reference to the singleton set {N} (CurGVOnlySetIdx) 294 // - a list mapping each previous set to its union with {N} (EncounteredUGS), 295 // if it actually occurs. 296 297 // We keep track of the sets of globals used together "close enough". 298 struct UsedGlobalSet { 299 BitVector Globals; 300 unsigned UsageCount = 1; 301 302 UsedGlobalSet(size_t Size) : Globals(Size) {} 303 }; 304 305 // Each set is unique in UsedGlobalSets. 306 std::vector<UsedGlobalSet> UsedGlobalSets; 307 308 // Avoid repeating the create-global-set pattern. 309 auto CreateGlobalSet = [&]() -> UsedGlobalSet & { 310 UsedGlobalSets.emplace_back(Globals.size()); 311 return UsedGlobalSets.back(); 312 }; 313 314 // The first set is the empty set. 315 CreateGlobalSet().UsageCount = 0; 316 317 // We define "close enough" to be "in the same function". 318 // FIXME: Grouping uses by function is way too aggressive, so we should have 319 // a better metric for distance between uses. 320 // The obvious alternative would be to group by BasicBlock, but that's in 321 // turn too conservative.. 322 // Anything in between wouldn't be trivial to compute, so just stick with 323 // per-function grouping. 324 325 // The value type is an index into UsedGlobalSets. 326 // The default (0) conveniently points to the empty set. 327 DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction; 328 329 // Now, look at each merge-eligible global in turn. 330 331 // Keep track of the sets we already encountered to which we added the 332 // current global. 333 // Each element matches the same-index element in UsedGlobalSets. 334 // This lets us efficiently tell whether a set has already been expanded to 335 // include the current global. 336 std::vector<size_t> EncounteredUGS; 337 338 for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) { 339 GlobalVariable *GV = Globals[GI]; 340 341 // Reset the encountered sets for this global and grow it in case we created 342 // new sets for the previous global. 343 EncounteredUGS.assign(UsedGlobalSets.size(), 0); 344 345 // We might need to create a set that only consists of the current global. 346 // Keep track of its index into UsedGlobalSets. 347 size_t CurGVOnlySetIdx = 0; 348 349 // For each global, look at all its Uses. 350 for (auto &U : GV->uses()) { 351 // This Use might be a ConstantExpr. We're interested in Instruction 352 // users, so look through ConstantExpr... 353 Use *UI, *UE; 354 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) { 355 if (CE->use_empty()) 356 continue; 357 UI = &*CE->use_begin(); 358 UE = nullptr; 359 } else if (isa<Instruction>(U.getUser())) { 360 UI = &U; 361 UE = UI->getNext(); 362 } else { 363 continue; 364 } 365 366 // ...to iterate on all the instruction users of the global. 367 // Note that we iterate on Uses and not on Users to be able to getNext(). 368 for (; UI != UE; UI = UI->getNext()) { 369 Instruction *I = dyn_cast<Instruction>(UI->getUser()); 370 if (!I) 371 continue; 372 373 Function *ParentFn = I->getParent()->getParent(); 374 375 // If we're only optimizing for size, ignore non-minsize functions. 376 if (Opt.SizeOnly && !ParentFn->hasMinSize()) 377 continue; 378 379 size_t UGSIdx = GlobalUsesByFunction[ParentFn]; 380 381 // If this is the first global the function uses, map it to the set 382 // consisting of this global only. 383 if (!UGSIdx) { 384 // If that set doesn't exist yet, create it. 385 if (!CurGVOnlySetIdx) { 386 CurGVOnlySetIdx = UsedGlobalSets.size(); 387 CreateGlobalSet().Globals.set(GI); 388 } else { 389 ++UsedGlobalSets[CurGVOnlySetIdx].UsageCount; 390 } 391 392 GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx; 393 continue; 394 } 395 396 // If we already encountered a use of this global in this function, just 397 // increment the counter. 398 if (UsedGlobalSets[UGSIdx].Globals.test(GI)) { 399 ++UsedGlobalSets[UGSIdx].UsageCount; 400 continue; 401 } 402 403 // If not, the previous set wasn't actually used in this function. 404 --UsedGlobalSets[UGSIdx].UsageCount; 405 406 // If we already expanded the previous set to include this global, just 407 // reuse that expanded set. 408 if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) { 409 ++UsedGlobalSets[ExpandedIdx].UsageCount; 410 GlobalUsesByFunction[ParentFn] = ExpandedIdx; 411 continue; 412 } 413 414 // If not, create a new set consisting of the union of the previous set 415 // and this global. Mark it as encountered, so we can reuse it later. 416 GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] = 417 UsedGlobalSets.size(); 418 419 UsedGlobalSet &NewUGS = CreateGlobalSet(); 420 NewUGS.Globals.set(GI); 421 NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals; 422 } 423 } 424 } 425 426 // We can choose to merge all globals together, but ignore globals never used 427 // with another global. This catches the obviously non-profitable cases of 428 // having a single global, but is aggressive enough for any other case. 429 if (GlobalMergeIgnoreSingleUse) { 430 BitVector AllGlobals(Globals.size()); 431 for (const UsedGlobalSet &UGS : UsedGlobalSets) { 432 if (UGS.UsageCount == 0) 433 continue; 434 if (UGS.Globals.count() > 1) 435 AllGlobals |= UGS.Globals; 436 } 437 return doMerge(Globals, AllGlobals, M, isConst, AddrSpace); 438 } 439 440 // Now we found a bunch of sets of globals used together. We accumulated 441 // the number of times we encountered the sets (i.e., the number of functions 442 // that use that exact set of globals). 443 // 444 // Multiply that by the size of the set to give us a crude profitability 445 // metric. 446 llvm::stable_sort(UsedGlobalSets, 447 [](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) { 448 return UGS1.Globals.count() * UGS1.UsageCount < 449 UGS2.Globals.count() * UGS2.UsageCount; 450 }); 451 452 // Starting from the sets with the best (=biggest) profitability, find a 453 // good combination. 454 // The ideal (and expensive) solution can only be found by trying all 455 // combinations, looking for the one with the best profitability. 456 // Don't be smart about it, and just pick the first compatible combination, 457 // starting with the sets with the best profitability. 458 BitVector PickedGlobals(Globals.size()); 459 bool Changed = false; 460 461 for (const UsedGlobalSet &UGS : llvm::reverse(UsedGlobalSets)) { 462 if (UGS.UsageCount == 0) 463 continue; 464 if (PickedGlobals.anyCommon(UGS.Globals)) 465 continue; 466 PickedGlobals |= UGS.Globals; 467 // If the set only contains one global, there's no point in merging. 468 // Ignore the global for inclusion in other sets though, so keep it in 469 // PickedGlobals. 470 if (UGS.Globals.count() < 2) 471 continue; 472 Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace); 473 } 474 475 return Changed; 476 } 477 478 bool GlobalMergeImpl::doMerge(const SmallVectorImpl<GlobalVariable *> &Globals, 479 const BitVector &GlobalSet, Module &M, 480 bool isConst, unsigned AddrSpace) const { 481 assert(Globals.size() > 1); 482 483 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 484 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 485 auto &DL = M.getDataLayout(); 486 487 LLVM_DEBUG(dbgs() << " Trying to merge set, starts with #" 488 << GlobalSet.find_first() << ", total of " << Globals.size() 489 << "\n"); 490 491 bool Changed = false; 492 ssize_t i = GlobalSet.find_first(); 493 while (i != -1) { 494 ssize_t j = 0; 495 uint64_t MergedSize = 0; 496 std::vector<Type*> Tys; 497 std::vector<Constant*> Inits; 498 std::vector<unsigned> StructIdxs; 499 500 bool HasExternal = false; 501 StringRef FirstExternalName; 502 Align MaxAlign; 503 unsigned CurIdx = 0; 504 for (j = i; j != -1; j = GlobalSet.find_next(j)) { 505 Type *Ty = Globals[j]->getValueType(); 506 507 // Make sure we use the same alignment AsmPrinter would use. 508 Align Alignment = DL.getPreferredAlign(Globals[j]); 509 unsigned Padding = alignTo(MergedSize, Alignment) - MergedSize; 510 MergedSize += Padding; 511 MergedSize += DL.getTypeAllocSize(Ty); 512 if (MergedSize > Opt.MaxOffset) { 513 break; 514 } 515 if (Padding) { 516 Tys.push_back(ArrayType::get(Int8Ty, Padding)); 517 Inits.push_back(ConstantAggregateZero::get(Tys.back())); 518 ++CurIdx; 519 } 520 Tys.push_back(Ty); 521 Inits.push_back(Globals[j]->getInitializer()); 522 StructIdxs.push_back(CurIdx++); 523 524 MaxAlign = std::max(MaxAlign, Alignment); 525 526 if (Globals[j]->hasExternalLinkage() && !HasExternal) { 527 HasExternal = true; 528 FirstExternalName = Globals[j]->getName(); 529 } 530 } 531 532 // Exit early if there is only one global to merge. 533 if (Tys.size() < 2) { 534 i = j; 535 continue; 536 } 537 538 // If merged variables doesn't have external linkage, we needn't to expose 539 // the symbol after merging. 540 GlobalValue::LinkageTypes Linkage = HasExternal 541 ? GlobalValue::ExternalLinkage 542 : GlobalValue::InternalLinkage; 543 // Use a packed struct so we can control alignment. 544 StructType *MergedTy = StructType::get(M.getContext(), Tys, true); 545 Constant *MergedInit = ConstantStruct::get(MergedTy, Inits); 546 547 // On Darwin external linkage needs to be preserved, otherwise 548 // dsymutil cannot preserve the debug info for the merged 549 // variables. If they have external linkage, use the symbol name 550 // of the first variable merged as the suffix of global symbol 551 // name. This avoids a link-time naming conflict for the 552 // _MergedGlobals symbols. 553 Twine MergedName = 554 (IsMachO && HasExternal) 555 ? "_MergedGlobals_" + FirstExternalName 556 : "_MergedGlobals"; 557 auto MergedLinkage = IsMachO ? Linkage : GlobalValue::PrivateLinkage; 558 auto *MergedGV = new GlobalVariable( 559 M, MergedTy, isConst, MergedLinkage, MergedInit, MergedName, nullptr, 560 GlobalVariable::NotThreadLocal, AddrSpace); 561 562 MergedGV->setAlignment(MaxAlign); 563 MergedGV->setSection(Globals[i]->getSection()); 564 565 LLVM_DEBUG(dbgs() << "MergedGV: " << *MergedGV << "\n"); 566 567 const StructLayout *MergedLayout = DL.getStructLayout(MergedTy); 568 for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k), ++idx) { 569 GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage(); 570 std::string Name(Globals[k]->getName()); 571 GlobalValue::VisibilityTypes Visibility = Globals[k]->getVisibility(); 572 GlobalValue::DLLStorageClassTypes DLLStorage = 573 Globals[k]->getDLLStorageClass(); 574 575 // Copy metadata while adjusting any debug info metadata by the original 576 // global's offset within the merged global. 577 MergedGV->copyMetadata(Globals[k], 578 MergedLayout->getElementOffset(StructIdxs[idx])); 579 580 Constant *Idx[2] = { 581 ConstantInt::get(Int32Ty, 0), 582 ConstantInt::get(Int32Ty, StructIdxs[idx]), 583 }; 584 Constant *GEP = 585 ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx); 586 Globals[k]->replaceAllUsesWith(GEP); 587 Globals[k]->eraseFromParent(); 588 589 // Emit an alias for the original variable name. This is necessary for an 590 // external symbol, as it may be accessed from another object. For 591 // internal symbols, it's not strictly required, but it's useful. 592 // 593 // This _should_ also work on Mach-O ever since '.alt_entry' support was 594 // added in 2016. Unfortunately, there's a bug in ld-prime (present at 595 // least from Xcode 15.0 through Xcode 16.0), in which -dead_strip doesn't 596 // always honor alt_entry. To workaround this issue, we don't emit aliases 597 // on Mach-O. Except, we _must_ do so for external symbols. That means 598 // MergeExternal is broken with that linker. (That option is currently off 599 // by default on MachO). 600 if (!IsMachO || Linkage == GlobalValue::ExternalLinkage) { 601 GlobalAlias *GA = GlobalAlias::create(Tys[StructIdxs[idx]], AddrSpace, 602 Linkage, Name, GEP, &M); 603 GA->setVisibility(Visibility); 604 GA->setDLLStorageClass(DLLStorage); 605 } 606 607 NumMerged++; 608 } 609 Changed = true; 610 i = j; 611 } 612 613 return Changed; 614 } 615 616 void GlobalMergeImpl::collectUsedGlobalVariables(Module &M, StringRef Name) { 617 // Extract global variables from llvm.used array 618 const GlobalVariable *GV = M.getGlobalVariable(Name); 619 if (!GV || !GV->hasInitializer()) return; 620 621 // Should be an array of 'i8*'. 622 const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer()); 623 624 for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) 625 if (const GlobalVariable *G = 626 dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts())) 627 MustKeepGlobalVariables.insert(G); 628 } 629 630 void GlobalMergeImpl::setMustKeepGlobalVariables(Module &M) { 631 collectUsedGlobalVariables(M, "llvm.used"); 632 collectUsedGlobalVariables(M, "llvm.compiler.used"); 633 634 for (Function &F : M) { 635 for (BasicBlock &BB : F) { 636 BasicBlock::iterator Pad = BB.getFirstNonPHIIt(); 637 auto *II = dyn_cast<IntrinsicInst>(Pad); 638 if (!Pad->isEHPad() && 639 !(II && II->getIntrinsicID() == Intrinsic::eh_typeid_for)) 640 continue; 641 642 // Keep globals used by landingpads, catchpads, 643 // or intrinsics that require a plain global. 644 for (const Use &U : Pad->operands()) { 645 if (const GlobalVariable *GV = 646 dyn_cast<GlobalVariable>(U->stripPointerCasts())) 647 MustKeepGlobalVariables.insert(GV); 648 else if (const ConstantArray *CA = dyn_cast<ConstantArray>(U->stripPointerCasts())) { 649 for (const Use &Elt : CA->operands()) { 650 if (const GlobalVariable *GV = 651 dyn_cast<GlobalVariable>(Elt->stripPointerCasts())) 652 MustKeepGlobalVariables.insert(GV); 653 } 654 } 655 } 656 } 657 } 658 } 659 660 // This function returns true if the given data Section name has custom 661 // subsection-splitting semantics in Mach-O (such as splitting by a fixed size) 662 // 663 // See also ObjFile::parseSections and getRecordSize in lld/MachO/InputFiles.cpp 664 static bool isSpecialMachOSection(StringRef Section) { 665 // Uses starts_with, since section attributes can appear at the end of the 666 // name. 667 return Section.starts_with("__DATA,__cfstring") || 668 Section.starts_with("__DATA,__objc_classrefs") || 669 Section.starts_with("__DATA,__objc_selrefs"); 670 } 671 672 bool GlobalMergeImpl::run(Module &M) { 673 if (!EnableGlobalMerge) 674 return false; 675 676 IsMachO = Triple(M.getTargetTriple()).isOSBinFormatMachO(); 677 678 auto &DL = M.getDataLayout(); 679 MapVector<std::pair<unsigned, StringRef>, SmallVector<GlobalVariable *, 0>> 680 Globals, ConstGlobals, BSSGlobals; 681 bool Changed = false; 682 setMustKeepGlobalVariables(M); 683 684 LLVM_DEBUG({ 685 dbgs() << "Number of GV that must be kept: " << 686 MustKeepGlobalVariables.size() << "\n"; 687 for (const GlobalVariable *KeptGV : MustKeepGlobalVariables) 688 dbgs() << "Kept: " << *KeptGV << "\n"; 689 }); 690 // Grab all non-const globals. 691 for (auto &GV : M.globals()) { 692 // Merge is safe for "normal" internal or external globals only 693 if (GV.isDeclaration() || GV.isThreadLocal() || GV.hasImplicitSection()) 694 continue; 695 696 // It's not safe to merge globals that may be preempted 697 if (TM && !TM->shouldAssumeDSOLocal(&GV)) 698 continue; 699 700 if (!(Opt.MergeExternal && GV.hasExternalLinkage()) && 701 !GV.hasLocalLinkage()) 702 continue; 703 704 PointerType *PT = dyn_cast<PointerType>(GV.getType()); 705 assert(PT && "Global variable is not a pointer!"); 706 707 unsigned AddressSpace = PT->getAddressSpace(); 708 StringRef Section = GV.getSection(); 709 710 // On Mach-O, some section names have special semantics. Don't merge these. 711 if (IsMachO && isSpecialMachOSection(Section)) 712 continue; 713 714 // Ignore all 'special' globals. 715 if (GV.getName().starts_with("llvm.") || GV.getName().starts_with(".llvm.")) 716 continue; 717 718 // Ignore all "required" globals: 719 if (isMustKeepGlobalVariable(&GV)) 720 continue; 721 722 // Don't merge tagged globals, as each global should have its own unique 723 // memory tag at runtime. TODO(hctim): This can be relaxed: constant globals 724 // with compatible alignment and the same contents may be merged as long as 725 // the globals occupy the same number of tag granules (i.e. `size_a / 16 == 726 // size_b / 16`). 727 if (GV.isTagged()) 728 continue; 729 730 Type *Ty = GV.getValueType(); 731 TypeSize AllocSize = DL.getTypeAllocSize(Ty); 732 if (AllocSize < Opt.MaxOffset && AllocSize >= Opt.MinSize) { 733 if (TM && 734 TargetLoweringObjectFile::getKindForGlobal(&GV, *TM).isBSS()) 735 BSSGlobals[{AddressSpace, Section}].push_back(&GV); 736 else if (GV.isConstant()) 737 ConstGlobals[{AddressSpace, Section}].push_back(&GV); 738 else 739 Globals[{AddressSpace, Section}].push_back(&GV); 740 } 741 LLVM_DEBUG(dbgs() << "GV " 742 << ((DL.getTypeAllocSize(Ty) < Opt.MaxOffset) 743 ? "to merge: " 744 : "not to merge: ") 745 << GV << "\n"); 746 } 747 748 for (auto &P : Globals) 749 if (P.second.size() > 1) 750 Changed |= doMerge(P.second, M, false, P.first.first); 751 752 for (auto &P : BSSGlobals) 753 if (P.second.size() > 1) 754 Changed |= doMerge(P.second, M, false, P.first.first); 755 756 if (Opt.MergeConstantGlobals) 757 for (auto &P : ConstGlobals) 758 if (P.second.size() > 1) 759 Changed |= doMerge(P.second, M, true, P.first.first); 760 761 return Changed; 762 } 763 764 Pass *llvm::createGlobalMergePass(const TargetMachine *TM, unsigned Offset, 765 bool OnlyOptimizeForSize, 766 bool MergeExternalByDefault, 767 bool MergeConstantByDefault, 768 bool MergeConstAggressiveByDefault) { 769 bool MergeExternal = (EnableGlobalMergeOnExternal == cl::BOU_UNSET) ? 770 MergeExternalByDefault : (EnableGlobalMergeOnExternal == cl::BOU_TRUE); 771 bool MergeConstant = EnableGlobalMergeOnConst || MergeConstantByDefault; 772 bool MergeConstAggressive = GlobalMergeAllConst.getNumOccurrences() > 0 773 ? GlobalMergeAllConst 774 : MergeConstAggressiveByDefault; 775 return new GlobalMerge(TM, Offset, OnlyOptimizeForSize, MergeExternal, 776 MergeConstant, MergeConstAggressive); 777 } 778