1 //===- GlobalMerge.cpp - Internal globals merging -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass merges globals with internal linkage into one. This way all the 10 // globals which were merged into a biggest one can be addressed using offsets 11 // from the same base pointer (no need for separate base pointer for each of the 12 // global). Such a transformation can significantly reduce the register pressure 13 // when many globals are involved. 14 // 15 // For example, consider the code which touches several global variables at 16 // once: 17 // 18 // static int foo[N], bar[N], baz[N]; 19 // 20 // for (i = 0; i < N; ++i) { 21 // foo[i] = bar[i] * baz[i]; 22 // } 23 // 24 // On ARM the addresses of 3 arrays should be kept in the registers, thus 25 // this code has quite large register pressure (loop body): 26 // 27 // ldr r1, [r5], #4 28 // ldr r2, [r6], #4 29 // mul r1, r2, r1 30 // str r1, [r0], #4 31 // 32 // Pass converts the code to something like: 33 // 34 // static struct { 35 // int foo[N]; 36 // int bar[N]; 37 // int baz[N]; 38 // } merged; 39 // 40 // for (i = 0; i < N; ++i) { 41 // merged.foo[i] = merged.bar[i] * merged.baz[i]; 42 // } 43 // 44 // and in ARM code this becomes: 45 // 46 // ldr r0, [r5, #40] 47 // ldr r1, [r5, #80] 48 // mul r0, r1, r0 49 // str r0, [r5], #4 50 // 51 // note that we saved 2 registers here almostly "for free". 52 // 53 // However, merging globals can have tradeoffs: 54 // - it confuses debuggers, tools, and users 55 // - it makes linker optimizations less useful (order files, LOHs, ...) 56 // - it forces usage of indexed addressing (which isn't necessarily "free") 57 // - it can increase register pressure when the uses are disparate enough. 58 // 59 // We use heuristics to discover the best global grouping we can (cf cl::opts). 60 // 61 // ===---------------------------------------------------------------------===// 62 63 #include "llvm/CodeGen/GlobalMerge.h" 64 #include "llvm/ADT/BitVector.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/MapVector.h" 67 #include "llvm/ADT/SetVector.h" 68 #include "llvm/ADT/SmallVector.h" 69 #include "llvm/ADT/Statistic.h" 70 #include "llvm/ADT/StringRef.h" 71 #include "llvm/ADT/Twine.h" 72 #include "llvm/CodeGen/Passes.h" 73 #include "llvm/IR/BasicBlock.h" 74 #include "llvm/IR/Constants.h" 75 #include "llvm/IR/DataLayout.h" 76 #include "llvm/IR/DerivedTypes.h" 77 #include "llvm/IR/Function.h" 78 #include "llvm/IR/GlobalAlias.h" 79 #include "llvm/IR/GlobalValue.h" 80 #include "llvm/IR/GlobalVariable.h" 81 #include "llvm/IR/Instruction.h" 82 #include "llvm/IR/IntrinsicInst.h" 83 #include "llvm/IR/Module.h" 84 #include "llvm/IR/Type.h" 85 #include "llvm/IR/Use.h" 86 #include "llvm/IR/User.h" 87 #include "llvm/InitializePasses.h" 88 #include "llvm/MC/SectionKind.h" 89 #include "llvm/Pass.h" 90 #include "llvm/Support/Casting.h" 91 #include "llvm/Support/CommandLine.h" 92 #include "llvm/Support/Debug.h" 93 #include "llvm/Support/raw_ostream.h" 94 #include "llvm/Target/TargetLoweringObjectFile.h" 95 #include "llvm/Target/TargetMachine.h" 96 #include "llvm/TargetParser/Triple.h" 97 #include <algorithm> 98 #include <cassert> 99 #include <cstddef> 100 #include <cstdint> 101 #include <string> 102 #include <vector> 103 104 using namespace llvm; 105 106 #define DEBUG_TYPE "global-merge" 107 108 // FIXME: This is only useful as a last-resort way to disable the pass. 109 static cl::opt<bool> 110 EnableGlobalMerge("enable-global-merge", cl::Hidden, 111 cl::desc("Enable the global merge pass"), 112 cl::init(true)); 113 114 static cl::opt<unsigned> 115 GlobalMergeMaxOffset("global-merge-max-offset", cl::Hidden, 116 cl::desc("Set maximum offset for global merge pass"), 117 cl::init(0)); 118 119 static cl::opt<bool> GlobalMergeGroupByUse( 120 "global-merge-group-by-use", cl::Hidden, 121 cl::desc("Improve global merge pass to look at uses"), cl::init(true)); 122 123 static cl::opt<bool> GlobalMergeAllConst( 124 "global-merge-all-const", cl::Hidden, 125 cl::desc("Merge all const globals without looking at uses"), 126 cl::init(false)); 127 128 static cl::opt<bool> GlobalMergeIgnoreSingleUse( 129 "global-merge-ignore-single-use", cl::Hidden, 130 cl::desc("Improve global merge pass to ignore globals only used alone"), 131 cl::init(true)); 132 133 static cl::opt<bool> 134 EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden, 135 cl::desc("Enable global merge pass on constants"), 136 cl::init(false)); 137 138 // FIXME: this could be a transitional option, and we probably need to remove 139 // it if only we are sure this optimization could always benefit all targets. 140 static cl::opt<cl::boolOrDefault> 141 EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden, 142 cl::desc("Enable global merge pass on external linkage")); 143 144 static cl::opt<unsigned> 145 GlobalMergeMinDataSize("global-merge-min-data-size", 146 cl::desc("The minimum size in bytes of each global " 147 "that should considered in merging."), 148 cl::init(0), cl::Hidden); 149 150 STATISTIC(NumMerged, "Number of globals merged"); 151 152 namespace { 153 154 class GlobalMergeImpl { 155 const TargetMachine *TM = nullptr; 156 GlobalMergeOptions Opt; 157 bool IsMachO = false; 158 159 private: 160 bool doMerge(SmallVectorImpl<GlobalVariable *> &Globals, Module &M, 161 bool isConst, unsigned AddrSpace) const; 162 163 /// Merge everything in \p Globals for which the corresponding bit 164 /// in \p GlobalSet is set. 165 bool doMerge(const SmallVectorImpl<GlobalVariable *> &Globals, 166 const BitVector &GlobalSet, Module &M, bool isConst, 167 unsigned AddrSpace) const; 168 169 /// Check if the given variable has been identified as must keep 170 /// \pre setMustKeepGlobalVariables must have been called on the Module that 171 /// contains GV 172 bool isMustKeepGlobalVariable(const GlobalVariable *GV) const { 173 return MustKeepGlobalVariables.count(GV); 174 } 175 176 /// Collect every variables marked as "used" or used in a landing pad 177 /// instruction for this Module. 178 void setMustKeepGlobalVariables(Module &M); 179 180 /// Collect every variables marked as "used" 181 void collectUsedGlobalVariables(Module &M, StringRef Name); 182 183 /// Keep track of the GlobalVariable that must not be merged away 184 SmallSetVector<const GlobalVariable *, 16> MustKeepGlobalVariables; 185 186 public: 187 GlobalMergeImpl(const TargetMachine *TM, GlobalMergeOptions Opt) 188 : TM(TM), Opt(Opt) {} 189 bool run(Module &M); 190 }; 191 192 class GlobalMerge : public FunctionPass { 193 const TargetMachine *TM = nullptr; 194 GlobalMergeOptions Opt; 195 196 public: 197 static char ID; // Pass identification, replacement for typeid. 198 199 explicit GlobalMerge() : FunctionPass(ID) { 200 Opt.MaxOffset = GlobalMergeMaxOffset; 201 initializeGlobalMergePass(*PassRegistry::getPassRegistry()); 202 } 203 204 explicit GlobalMerge(const TargetMachine *TM, unsigned MaximalOffset, 205 bool OnlyOptimizeForSize, bool MergeExternalGlobals, 206 bool MergeConstantGlobals, bool MergeConstAggressive) 207 : FunctionPass(ID), TM(TM) { 208 Opt.MaxOffset = MaximalOffset; 209 Opt.SizeOnly = OnlyOptimizeForSize; 210 Opt.MergeExternal = MergeExternalGlobals; 211 Opt.MergeConstantGlobals = MergeConstantGlobals; 212 Opt.MergeConstAggressive = MergeConstAggressive; 213 initializeGlobalMergePass(*PassRegistry::getPassRegistry()); 214 } 215 216 bool doInitialization(Module &M) override { 217 auto GetSmallDataLimit = [](Module &M) -> std::optional<uint64_t> { 218 Metadata *SDL = M.getModuleFlag("SmallDataLimit"); 219 if (!SDL) 220 return std::nullopt; 221 return mdconst::extract<ConstantInt>(SDL)->getZExtValue(); 222 }; 223 if (GlobalMergeMinDataSize.getNumOccurrences()) 224 Opt.MinSize = GlobalMergeMinDataSize; 225 else if (auto SDL = GetSmallDataLimit(M); SDL && *SDL > 0) 226 Opt.MinSize = *SDL + 1; 227 else 228 Opt.MinSize = 0; 229 230 GlobalMergeImpl P(TM, Opt); 231 return P.run(M); 232 } 233 bool runOnFunction(Function &F) override { return false; } 234 235 StringRef getPassName() const override { return "Merge internal globals"; } 236 237 void getAnalysisUsage(AnalysisUsage &AU) const override { 238 AU.setPreservesCFG(); 239 FunctionPass::getAnalysisUsage(AU); 240 } 241 }; 242 243 } // end anonymous namespace 244 245 PreservedAnalyses GlobalMergePass::run(Module &M, ModuleAnalysisManager &) { 246 GlobalMergeImpl P(TM, Options); 247 bool Changed = P.run(M); 248 if (!Changed) 249 return PreservedAnalyses::all(); 250 251 PreservedAnalyses PA; 252 PA.preserveSet<CFGAnalyses>(); 253 return PA; 254 } 255 256 char GlobalMerge::ID = 0; 257 258 INITIALIZE_PASS(GlobalMerge, DEBUG_TYPE, "Merge global variables", false, false) 259 260 bool GlobalMergeImpl::doMerge(SmallVectorImpl<GlobalVariable *> &Globals, 261 Module &M, bool isConst, 262 unsigned AddrSpace) const { 263 auto &DL = M.getDataLayout(); 264 // FIXME: Find better heuristics 265 llvm::stable_sort( 266 Globals, [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) { 267 // We don't support scalable global variables. 268 return DL.getTypeAllocSize(GV1->getValueType()).getFixedValue() < 269 DL.getTypeAllocSize(GV2->getValueType()).getFixedValue(); 270 }); 271 272 // If we want to just blindly group all globals together, do so. 273 if (!GlobalMergeGroupByUse || (Opt.MergeConstAggressive && isConst)) { 274 BitVector AllGlobals(Globals.size(), true); 275 return doMerge(Globals, AllGlobals, M, isConst, AddrSpace); 276 } 277 278 // If we want to be smarter, look at all uses of each global, to try to 279 // discover all sets of globals used together, and how many times each of 280 // these sets occurred. 281 // 282 // Keep this reasonably efficient, by having an append-only list of all sets 283 // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of 284 // code (currently, a Function) to the set of globals seen so far that are 285 // used together in that unit (GlobalUsesByFunction). 286 // 287 // When we look at the Nth global, we know that any new set is either: 288 // - the singleton set {N}, containing this global only, or 289 // - the union of {N} and a previously-discovered set, containing some 290 // combination of the previous N-1 globals. 291 // Using that knowledge, when looking at the Nth global, we can keep: 292 // - a reference to the singleton set {N} (CurGVOnlySetIdx) 293 // - a list mapping each previous set to its union with {N} (EncounteredUGS), 294 // if it actually occurs. 295 296 // We keep track of the sets of globals used together "close enough". 297 struct UsedGlobalSet { 298 BitVector Globals; 299 unsigned UsageCount = 1; 300 301 UsedGlobalSet(size_t Size) : Globals(Size) {} 302 }; 303 304 // Each set is unique in UsedGlobalSets. 305 std::vector<UsedGlobalSet> UsedGlobalSets; 306 307 // Avoid repeating the create-global-set pattern. 308 auto CreateGlobalSet = [&]() -> UsedGlobalSet & { 309 UsedGlobalSets.emplace_back(Globals.size()); 310 return UsedGlobalSets.back(); 311 }; 312 313 // The first set is the empty set. 314 CreateGlobalSet().UsageCount = 0; 315 316 // We define "close enough" to be "in the same function". 317 // FIXME: Grouping uses by function is way too aggressive, so we should have 318 // a better metric for distance between uses. 319 // The obvious alternative would be to group by BasicBlock, but that's in 320 // turn too conservative.. 321 // Anything in between wouldn't be trivial to compute, so just stick with 322 // per-function grouping. 323 324 // The value type is an index into UsedGlobalSets. 325 // The default (0) conveniently points to the empty set. 326 DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction; 327 328 // Now, look at each merge-eligible global in turn. 329 330 // Keep track of the sets we already encountered to which we added the 331 // current global. 332 // Each element matches the same-index element in UsedGlobalSets. 333 // This lets us efficiently tell whether a set has already been expanded to 334 // include the current global. 335 std::vector<size_t> EncounteredUGS; 336 337 for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) { 338 GlobalVariable *GV = Globals[GI]; 339 340 // Reset the encountered sets for this global and grow it in case we created 341 // new sets for the previous global. 342 EncounteredUGS.assign(UsedGlobalSets.size(), 0); 343 344 // We might need to create a set that only consists of the current global. 345 // Keep track of its index into UsedGlobalSets. 346 size_t CurGVOnlySetIdx = 0; 347 348 // For each global, look at all its Uses. 349 for (auto &U : GV->uses()) { 350 // This Use might be a ConstantExpr. We're interested in Instruction 351 // users, so look through ConstantExpr... 352 Use *UI, *UE; 353 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) { 354 if (CE->use_empty()) 355 continue; 356 UI = &*CE->use_begin(); 357 UE = nullptr; 358 } else if (isa<Instruction>(U.getUser())) { 359 UI = &U; 360 UE = UI->getNext(); 361 } else { 362 continue; 363 } 364 365 // ...to iterate on all the instruction users of the global. 366 // Note that we iterate on Uses and not on Users to be able to getNext(). 367 for (; UI != UE; UI = UI->getNext()) { 368 Instruction *I = dyn_cast<Instruction>(UI->getUser()); 369 if (!I) 370 continue; 371 372 Function *ParentFn = I->getParent()->getParent(); 373 374 // If we're only optimizing for size, ignore non-minsize functions. 375 if (Opt.SizeOnly && !ParentFn->hasMinSize()) 376 continue; 377 378 size_t UGSIdx = GlobalUsesByFunction[ParentFn]; 379 380 // If this is the first global the function uses, map it to the set 381 // consisting of this global only. 382 if (!UGSIdx) { 383 // If that set doesn't exist yet, create it. 384 if (!CurGVOnlySetIdx) { 385 CurGVOnlySetIdx = UsedGlobalSets.size(); 386 CreateGlobalSet().Globals.set(GI); 387 } else { 388 ++UsedGlobalSets[CurGVOnlySetIdx].UsageCount; 389 } 390 391 GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx; 392 continue; 393 } 394 395 // If we already encountered a use of this global in this function, just 396 // increment the counter. 397 if (UsedGlobalSets[UGSIdx].Globals.test(GI)) { 398 ++UsedGlobalSets[UGSIdx].UsageCount; 399 continue; 400 } 401 402 // If not, the previous set wasn't actually used in this function. 403 --UsedGlobalSets[UGSIdx].UsageCount; 404 405 // If we already expanded the previous set to include this global, just 406 // reuse that expanded set. 407 if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) { 408 ++UsedGlobalSets[ExpandedIdx].UsageCount; 409 GlobalUsesByFunction[ParentFn] = ExpandedIdx; 410 continue; 411 } 412 413 // If not, create a new set consisting of the union of the previous set 414 // and this global. Mark it as encountered, so we can reuse it later. 415 GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] = 416 UsedGlobalSets.size(); 417 418 UsedGlobalSet &NewUGS = CreateGlobalSet(); 419 NewUGS.Globals.set(GI); 420 NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals; 421 } 422 } 423 } 424 425 // We can choose to merge all globals together, but ignore globals never used 426 // with another global. This catches the obviously non-profitable cases of 427 // having a single global, but is aggressive enough for any other case. 428 if (GlobalMergeIgnoreSingleUse) { 429 BitVector AllGlobals(Globals.size()); 430 for (const UsedGlobalSet &UGS : UsedGlobalSets) { 431 if (UGS.UsageCount == 0) 432 continue; 433 if (UGS.Globals.count() > 1) 434 AllGlobals |= UGS.Globals; 435 } 436 return doMerge(Globals, AllGlobals, M, isConst, AddrSpace); 437 } 438 439 // Now we found a bunch of sets of globals used together. We accumulated 440 // the number of times we encountered the sets (i.e., the number of functions 441 // that use that exact set of globals). 442 // 443 // Multiply that by the size of the set to give us a crude profitability 444 // metric. 445 llvm::stable_sort(UsedGlobalSets, 446 [](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) { 447 return UGS1.Globals.count() * UGS1.UsageCount < 448 UGS2.Globals.count() * UGS2.UsageCount; 449 }); 450 451 // Starting from the sets with the best (=biggest) profitability, find a 452 // good combination. 453 // The ideal (and expensive) solution can only be found by trying all 454 // combinations, looking for the one with the best profitability. 455 // Don't be smart about it, and just pick the first compatible combination, 456 // starting with the sets with the best profitability. 457 BitVector PickedGlobals(Globals.size()); 458 bool Changed = false; 459 460 for (const UsedGlobalSet &UGS : llvm::reverse(UsedGlobalSets)) { 461 if (UGS.UsageCount == 0) 462 continue; 463 if (PickedGlobals.anyCommon(UGS.Globals)) 464 continue; 465 PickedGlobals |= UGS.Globals; 466 // If the set only contains one global, there's no point in merging. 467 // Ignore the global for inclusion in other sets though, so keep it in 468 // PickedGlobals. 469 if (UGS.Globals.count() < 2) 470 continue; 471 Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace); 472 } 473 474 return Changed; 475 } 476 477 bool GlobalMergeImpl::doMerge(const SmallVectorImpl<GlobalVariable *> &Globals, 478 const BitVector &GlobalSet, Module &M, 479 bool isConst, unsigned AddrSpace) const { 480 assert(Globals.size() > 1); 481 482 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 483 Type *Int8Ty = Type::getInt8Ty(M.getContext()); 484 auto &DL = M.getDataLayout(); 485 486 LLVM_DEBUG(dbgs() << " Trying to merge set, starts with #" 487 << GlobalSet.find_first() << ", total of " << Globals.size() 488 << "\n"); 489 490 bool Changed = false; 491 ssize_t i = GlobalSet.find_first(); 492 while (i != -1) { 493 ssize_t j = 0; 494 uint64_t MergedSize = 0; 495 std::vector<Type*> Tys; 496 std::vector<Constant*> Inits; 497 std::vector<unsigned> StructIdxs; 498 499 bool HasExternal = false; 500 StringRef FirstExternalName; 501 Align MaxAlign; 502 unsigned CurIdx = 0; 503 for (j = i; j != -1; j = GlobalSet.find_next(j)) { 504 Type *Ty = Globals[j]->getValueType(); 505 506 // Make sure we use the same alignment AsmPrinter would use. 507 Align Alignment = DL.getPreferredAlign(Globals[j]); 508 unsigned Padding = alignTo(MergedSize, Alignment) - MergedSize; 509 MergedSize += Padding; 510 MergedSize += DL.getTypeAllocSize(Ty); 511 if (MergedSize > Opt.MaxOffset) { 512 break; 513 } 514 if (Padding) { 515 Tys.push_back(ArrayType::get(Int8Ty, Padding)); 516 Inits.push_back(ConstantAggregateZero::get(Tys.back())); 517 ++CurIdx; 518 } 519 Tys.push_back(Ty); 520 Inits.push_back(Globals[j]->getInitializer()); 521 StructIdxs.push_back(CurIdx++); 522 523 MaxAlign = std::max(MaxAlign, Alignment); 524 525 if (Globals[j]->hasExternalLinkage() && !HasExternal) { 526 HasExternal = true; 527 FirstExternalName = Globals[j]->getName(); 528 } 529 } 530 531 // Exit early if there is only one global to merge. 532 if (Tys.size() < 2) { 533 i = j; 534 continue; 535 } 536 537 // If merged variables doesn't have external linkage, we needn't to expose 538 // the symbol after merging. 539 GlobalValue::LinkageTypes Linkage = HasExternal 540 ? GlobalValue::ExternalLinkage 541 : GlobalValue::InternalLinkage; 542 // Use a packed struct so we can control alignment. 543 StructType *MergedTy = StructType::get(M.getContext(), Tys, true); 544 Constant *MergedInit = ConstantStruct::get(MergedTy, Inits); 545 546 // On Darwin external linkage needs to be preserved, otherwise 547 // dsymutil cannot preserve the debug info for the merged 548 // variables. If they have external linkage, use the symbol name 549 // of the first variable merged as the suffix of global symbol 550 // name. This avoids a link-time naming conflict for the 551 // _MergedGlobals symbols. 552 Twine MergedName = 553 (IsMachO && HasExternal) 554 ? "_MergedGlobals_" + FirstExternalName 555 : "_MergedGlobals"; 556 auto MergedLinkage = IsMachO ? Linkage : GlobalValue::PrivateLinkage; 557 auto *MergedGV = new GlobalVariable( 558 M, MergedTy, isConst, MergedLinkage, MergedInit, MergedName, nullptr, 559 GlobalVariable::NotThreadLocal, AddrSpace); 560 561 MergedGV->setAlignment(MaxAlign); 562 MergedGV->setSection(Globals[i]->getSection()); 563 564 LLVM_DEBUG(dbgs() << "MergedGV: " << *MergedGV << "\n"); 565 566 const StructLayout *MergedLayout = DL.getStructLayout(MergedTy); 567 for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k), ++idx) { 568 GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage(); 569 std::string Name(Globals[k]->getName()); 570 GlobalValue::VisibilityTypes Visibility = Globals[k]->getVisibility(); 571 GlobalValue::DLLStorageClassTypes DLLStorage = 572 Globals[k]->getDLLStorageClass(); 573 574 // Copy metadata while adjusting any debug info metadata by the original 575 // global's offset within the merged global. 576 MergedGV->copyMetadata(Globals[k], 577 MergedLayout->getElementOffset(StructIdxs[idx])); 578 579 Constant *Idx[2] = { 580 ConstantInt::get(Int32Ty, 0), 581 ConstantInt::get(Int32Ty, StructIdxs[idx]), 582 }; 583 Constant *GEP = 584 ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx); 585 Globals[k]->replaceAllUsesWith(GEP); 586 Globals[k]->eraseFromParent(); 587 588 // Emit an alias for the original variable name. This is necessary for an 589 // external symbol, as it may be accessed from another object. For 590 // internal symbols, it's not strictly required, but it's useful. 591 // 592 // This _should_ also work on Mach-O ever since '.alt_entry' support was 593 // added in 2016. Unfortunately, there's a bug in ld-prime (present at 594 // least from Xcode 15.0 through Xcode 16.0), in which -dead_strip doesn't 595 // always honor alt_entry. To workaround this issue, we don't emit aliases 596 // on Mach-O. Except, we _must_ do so for external symbols. That means 597 // MergeExternal is broken with that linker. (That option is currently off 598 // by default on MachO). 599 if (!IsMachO || Linkage == GlobalValue::ExternalLinkage) { 600 GlobalAlias *GA = GlobalAlias::create(Tys[StructIdxs[idx]], AddrSpace, 601 Linkage, Name, GEP, &M); 602 GA->setVisibility(Visibility); 603 GA->setDLLStorageClass(DLLStorage); 604 } 605 606 NumMerged++; 607 } 608 Changed = true; 609 i = j; 610 } 611 612 return Changed; 613 } 614 615 void GlobalMergeImpl::collectUsedGlobalVariables(Module &M, StringRef Name) { 616 // Extract global variables from llvm.used array 617 const GlobalVariable *GV = M.getGlobalVariable(Name); 618 if (!GV || !GV->hasInitializer()) return; 619 620 // Should be an array of 'i8*'. 621 const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer()); 622 623 for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) 624 if (const GlobalVariable *G = 625 dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts())) 626 MustKeepGlobalVariables.insert(G); 627 } 628 629 void GlobalMergeImpl::setMustKeepGlobalVariables(Module &M) { 630 collectUsedGlobalVariables(M, "llvm.used"); 631 collectUsedGlobalVariables(M, "llvm.compiler.used"); 632 633 for (Function &F : M) { 634 for (BasicBlock &BB : F) { 635 BasicBlock::iterator Pad = BB.getFirstNonPHIIt(); 636 auto *II = dyn_cast<IntrinsicInst>(Pad); 637 if (!Pad->isEHPad() && 638 !(II && II->getIntrinsicID() == Intrinsic::eh_typeid_for)) 639 continue; 640 641 // Keep globals used by landingpads, catchpads, 642 // or intrinsics that require a plain global. 643 for (const Use &U : Pad->operands()) { 644 if (const GlobalVariable *GV = 645 dyn_cast<GlobalVariable>(U->stripPointerCasts())) 646 MustKeepGlobalVariables.insert(GV); 647 else if (const ConstantArray *CA = dyn_cast<ConstantArray>(U->stripPointerCasts())) { 648 for (const Use &Elt : CA->operands()) { 649 if (const GlobalVariable *GV = 650 dyn_cast<GlobalVariable>(Elt->stripPointerCasts())) 651 MustKeepGlobalVariables.insert(GV); 652 } 653 } 654 } 655 } 656 } 657 } 658 659 // This function returns true if the given data Section name has custom 660 // subsection-splitting semantics in Mach-O (such as splitting by a fixed size) 661 // 662 // See also ObjFile::parseSections and getRecordSize in lld/MachO/InputFiles.cpp 663 static bool isSpecialMachOSection(StringRef Section) { 664 // Uses starts_with, since section attributes can appear at the end of the 665 // name. 666 return Section.starts_with("__DATA,__cfstring") || 667 Section.starts_with("__DATA,__objc_classrefs") || 668 Section.starts_with("__DATA,__objc_selrefs"); 669 } 670 671 bool GlobalMergeImpl::run(Module &M) { 672 if (!EnableGlobalMerge) 673 return false; 674 675 IsMachO = Triple(M.getTargetTriple()).isOSBinFormatMachO(); 676 677 auto &DL = M.getDataLayout(); 678 MapVector<std::pair<unsigned, StringRef>, SmallVector<GlobalVariable *, 0>> 679 Globals, ConstGlobals, BSSGlobals; 680 bool Changed = false; 681 setMustKeepGlobalVariables(M); 682 683 LLVM_DEBUG({ 684 dbgs() << "Number of GV that must be kept: " << 685 MustKeepGlobalVariables.size() << "\n"; 686 for (const GlobalVariable *KeptGV : MustKeepGlobalVariables) 687 dbgs() << "Kept: " << *KeptGV << "\n"; 688 }); 689 // Grab all non-const globals. 690 for (auto &GV : M.globals()) { 691 // Merge is safe for "normal" internal or external globals only 692 if (GV.isDeclaration() || GV.isThreadLocal() || GV.hasImplicitSection()) 693 continue; 694 695 // It's not safe to merge globals that may be preempted 696 if (TM && !TM->shouldAssumeDSOLocal(&GV)) 697 continue; 698 699 if (!(Opt.MergeExternal && GV.hasExternalLinkage()) && 700 !GV.hasLocalLinkage()) 701 continue; 702 703 PointerType *PT = dyn_cast<PointerType>(GV.getType()); 704 assert(PT && "Global variable is not a pointer!"); 705 706 unsigned AddressSpace = PT->getAddressSpace(); 707 StringRef Section = GV.getSection(); 708 709 // On Mach-O, some section names have special semantics. Don't merge these. 710 if (IsMachO && isSpecialMachOSection(Section)) 711 continue; 712 713 // Ignore all 'special' globals. 714 if (GV.getName().starts_with("llvm.") || GV.getName().starts_with(".llvm.")) 715 continue; 716 717 // Ignore all "required" globals: 718 if (isMustKeepGlobalVariable(&GV)) 719 continue; 720 721 // Don't merge tagged globals, as each global should have its own unique 722 // memory tag at runtime. TODO(hctim): This can be relaxed: constant globals 723 // with compatible alignment and the same contents may be merged as long as 724 // the globals occupy the same number of tag granules (i.e. `size_a / 16 == 725 // size_b / 16`). 726 if (GV.isTagged()) 727 continue; 728 729 Type *Ty = GV.getValueType(); 730 TypeSize AllocSize = DL.getTypeAllocSize(Ty); 731 bool CanMerge = AllocSize < Opt.MaxOffset && AllocSize >= Opt.MinSize; 732 if (CanMerge) { 733 if (TM && 734 TargetLoweringObjectFile::getKindForGlobal(&GV, *TM).isBSS()) 735 BSSGlobals[{AddressSpace, Section}].push_back(&GV); 736 else if (GV.isConstant()) 737 ConstGlobals[{AddressSpace, Section}].push_back(&GV); 738 else 739 Globals[{AddressSpace, Section}].push_back(&GV); 740 } 741 LLVM_DEBUG(dbgs() << "GV " << (CanMerge ? "" : "not ") << "to merge: " << GV 742 << "\n"); 743 } 744 745 for (auto &P : Globals) 746 if (P.second.size() > 1) 747 Changed |= doMerge(P.second, M, false, P.first.first); 748 749 for (auto &P : BSSGlobals) 750 if (P.second.size() > 1) 751 Changed |= doMerge(P.second, M, false, P.first.first); 752 753 if (Opt.MergeConstantGlobals) 754 for (auto &P : ConstGlobals) 755 if (P.second.size() > 1) 756 Changed |= doMerge(P.second, M, true, P.first.first); 757 758 return Changed; 759 } 760 761 Pass *llvm::createGlobalMergePass(const TargetMachine *TM, unsigned Offset, 762 bool OnlyOptimizeForSize, 763 bool MergeExternalByDefault, 764 bool MergeConstantByDefault, 765 bool MergeConstAggressiveByDefault) { 766 bool MergeExternal = (EnableGlobalMergeOnExternal == cl::BOU_UNSET) ? 767 MergeExternalByDefault : (EnableGlobalMergeOnExternal == cl::BOU_TRUE); 768 bool MergeConstant = EnableGlobalMergeOnConst || MergeConstantByDefault; 769 bool MergeConstAggressive = GlobalMergeAllConst.getNumOccurrences() > 0 770 ? GlobalMergeAllConst 771 : MergeConstAggressiveByDefault; 772 return new GlobalMerge(TM, Offset, OnlyOptimizeForSize, MergeExternal, 773 MergeConstant, MergeConstAggressive); 774 } 775