1 //===-- GlobalMerge.cpp - Internal globals merging -----------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass merges globals with internal linkage into one. This way all the 10 // globals which were merged into a biggest one can be addressed using offsets 11 // from the same base pointer (no need for separate base pointer for each of the 12 // global). Such a transformation can significantly reduce the register pressure 13 // when many globals are involved. 14 // 15 // For example, consider the code which touches several global variables at 16 // once: 17 // 18 // static int foo[N], bar[N], baz[N]; 19 // 20 // for (i = 0; i < N; ++i) { 21 // foo[i] = bar[i] * baz[i]; 22 // } 23 // 24 // On ARM the addresses of 3 arrays should be kept in the registers, thus 25 // this code has quite large register pressure (loop body): 26 // 27 // ldr r1, [r5], #4 28 // ldr r2, [r6], #4 29 // mul r1, r2, r1 30 // str r1, [r0], #4 31 // 32 // Pass converts the code to something like: 33 // 34 // static struct { 35 // int foo[N]; 36 // int bar[N]; 37 // int baz[N]; 38 // } merged; 39 // 40 // for (i = 0; i < N; ++i) { 41 // merged.foo[i] = merged.bar[i] * merged.baz[i]; 42 // } 43 // 44 // and in ARM code this becomes: 45 // 46 // ldr r0, [r5, #40] 47 // ldr r1, [r5, #80] 48 // mul r0, r1, r0 49 // str r0, [r5], #4 50 // 51 // note that we saved 2 registers here almostly "for free". 52 // 53 // However, merging globals can have tradeoffs: 54 // - it confuses debuggers, tools, and users 55 // - it makes linker optimizations less useful (order files, LOHs, ...) 56 // - it forces usage of indexed addressing (which isn't necessarily "free") 57 // - it can increase register pressure when the uses are disparate enough. 58 // 59 // We use heuristics to discover the best global grouping we can (cf cl::opts). 60 // ===---------------------------------------------------------------------===// 61 62 #include "llvm/Transforms/Scalar.h" 63 #include "llvm/ADT/DenseMap.h" 64 #include "llvm/ADT/SmallBitVector.h" 65 #include "llvm/ADT/SmallPtrSet.h" 66 #include "llvm/ADT/Statistic.h" 67 #include "llvm/CodeGen/Passes.h" 68 #include "llvm/IR/Attributes.h" 69 #include "llvm/IR/Constants.h" 70 #include "llvm/IR/DataLayout.h" 71 #include "llvm/IR/DerivedTypes.h" 72 #include "llvm/IR/Function.h" 73 #include "llvm/IR/GlobalVariable.h" 74 #include "llvm/IR/Instructions.h" 75 #include "llvm/IR/Intrinsics.h" 76 #include "llvm/IR/Module.h" 77 #include "llvm/Pass.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/raw_ostream.h" 81 #include "llvm/Target/TargetLowering.h" 82 #include "llvm/Target/TargetLoweringObjectFile.h" 83 #include "llvm/Target/TargetSubtargetInfo.h" 84 #include <algorithm> 85 using namespace llvm; 86 87 #define DEBUG_TYPE "global-merge" 88 89 // FIXME: This is only useful as a last-resort way to disable the pass. 90 static cl::opt<bool> 91 EnableGlobalMerge("enable-global-merge", cl::Hidden, 92 cl::desc("Enable the global merge pass"), 93 cl::init(true)); 94 95 static cl::opt<bool> GlobalMergeGroupByUse( 96 "global-merge-group-by-use", cl::Hidden, 97 cl::desc("Improve global merge pass to look at uses"), cl::init(true)); 98 99 static cl::opt<bool> GlobalMergeIgnoreSingleUse( 100 "global-merge-ignore-single-use", cl::Hidden, 101 cl::desc("Improve global merge pass to ignore globals only used alone"), 102 cl::init(true)); 103 104 static cl::opt<bool> 105 EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden, 106 cl::desc("Enable global merge pass on constants"), 107 cl::init(false)); 108 109 // FIXME: this could be a transitional option, and we probably need to remove 110 // it if only we are sure this optimization could always benefit all targets. 111 static cl::opt<bool> 112 EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden, 113 cl::desc("Enable global merge pass on external linkage"), 114 cl::init(false)); 115 116 STATISTIC(NumMerged, "Number of globals merged"); 117 namespace { 118 class GlobalMerge : public FunctionPass { 119 const TargetMachine *TM; 120 const DataLayout *DL; 121 // FIXME: Infer the maximum possible offset depending on the actual users 122 // (these max offsets are different for the users inside Thumb or ARM 123 // functions), see the code that passes in the offset in the ARM backend 124 // for more information. 125 unsigned MaxOffset; 126 127 /// Whether we should try to optimize for size only. 128 /// Currently, this applies a dead simple heuristic: only consider globals 129 /// used in minsize functions for merging. 130 /// FIXME: This could learn about optsize, and be used in the cost model. 131 bool OnlyOptimizeForSize; 132 133 bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals, 134 Module &M, bool isConst, unsigned AddrSpace) const; 135 /// \brief Merge everything in \p Globals for which the corresponding bit 136 /// in \p GlobalSet is set. 137 bool doMerge(SmallVectorImpl<GlobalVariable *> &Globals, 138 const BitVector &GlobalSet, Module &M, bool isConst, 139 unsigned AddrSpace) const; 140 141 /// \brief Check if the given variable has been identified as must keep 142 /// \pre setMustKeepGlobalVariables must have been called on the Module that 143 /// contains GV 144 bool isMustKeepGlobalVariable(const GlobalVariable *GV) const { 145 return MustKeepGlobalVariables.count(GV); 146 } 147 148 /// Collect every variables marked as "used" or used in a landing pad 149 /// instruction for this Module. 150 void setMustKeepGlobalVariables(Module &M); 151 152 /// Collect every variables marked as "used" 153 void collectUsedGlobalVariables(Module &M); 154 155 /// Keep track of the GlobalVariable that must not be merged away 156 SmallPtrSet<const GlobalVariable *, 16> MustKeepGlobalVariables; 157 158 public: 159 static char ID; // Pass identification, replacement for typeid. 160 explicit GlobalMerge(const TargetMachine *TM = nullptr, 161 unsigned MaximalOffset = 0, 162 bool OnlyOptimizeForSize = false) 163 : FunctionPass(ID), TM(TM), DL(TM->getDataLayout()), 164 MaxOffset(MaximalOffset), OnlyOptimizeForSize(OnlyOptimizeForSize) { 165 initializeGlobalMergePass(*PassRegistry::getPassRegistry()); 166 } 167 168 bool doInitialization(Module &M) override; 169 bool runOnFunction(Function &F) override; 170 bool doFinalization(Module &M) override; 171 172 const char *getPassName() const override { 173 return "Merge internal globals"; 174 } 175 176 void getAnalysisUsage(AnalysisUsage &AU) const override { 177 AU.setPreservesCFG(); 178 FunctionPass::getAnalysisUsage(AU); 179 } 180 }; 181 } // end anonymous namespace 182 183 char GlobalMerge::ID = 0; 184 INITIALIZE_PASS_BEGIN(GlobalMerge, "global-merge", "Merge global variables", 185 false, false) 186 INITIALIZE_PASS_END(GlobalMerge, "global-merge", "Merge global variables", 187 false, false) 188 189 bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals, 190 Module &M, bool isConst, unsigned AddrSpace) const { 191 // FIXME: Find better heuristics 192 std::stable_sort(Globals.begin(), Globals.end(), 193 [this](const GlobalVariable *GV1, const GlobalVariable *GV2) { 194 Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType(); 195 Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType(); 196 197 return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2)); 198 }); 199 200 // If we want to just blindly group all globals together, do so. 201 if (!GlobalMergeGroupByUse) { 202 BitVector AllGlobals(Globals.size()); 203 AllGlobals.set(); 204 return doMerge(Globals, AllGlobals, M, isConst, AddrSpace); 205 } 206 207 // If we want to be smarter, look at all uses of each global, to try to 208 // discover all sets of globals used together, and how many times each of 209 // these sets occured. 210 // 211 // Keep this reasonably efficient, by having an append-only list of all sets 212 // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of 213 // code (currently, a Function) to the set of globals seen so far that are 214 // used together in that unit (GlobalUsesByFunction). 215 // 216 // When we look at the Nth global, we now that any new set is either: 217 // - the singleton set {N}, containing this global only, or 218 // - the union of {N} and a previously-discovered set, containing some 219 // combination of the previous N-1 globals. 220 // Using that knowledge, when looking at the Nth global, we can keep: 221 // - a reference to the singleton set {N} (CurGVOnlySetIdx) 222 // - a list mapping each previous set to its union with {N} (EncounteredUGS), 223 // if it actually occurs. 224 225 // We keep track of the sets of globals used together "close enough". 226 struct UsedGlobalSet { 227 UsedGlobalSet(size_t Size) : Globals(Size), UsageCount(1) {} 228 BitVector Globals; 229 unsigned UsageCount; 230 }; 231 232 // Each set is unique in UsedGlobalSets. 233 std::vector<UsedGlobalSet> UsedGlobalSets; 234 235 // Avoid repeating the create-global-set pattern. 236 auto CreateGlobalSet = [&]() -> UsedGlobalSet & { 237 UsedGlobalSets.emplace_back(Globals.size()); 238 return UsedGlobalSets.back(); 239 }; 240 241 // The first set is the empty set. 242 CreateGlobalSet().UsageCount = 0; 243 244 // We define "close enough" to be "in the same function". 245 // FIXME: Grouping uses by function is way too aggressive, so we should have 246 // a better metric for distance between uses. 247 // The obvious alternative would be to group by BasicBlock, but that's in 248 // turn too conservative.. 249 // Anything in between wouldn't be trivial to compute, so just stick with 250 // per-function grouping. 251 252 // The value type is an index into UsedGlobalSets. 253 // The default (0) conveniently points to the empty set. 254 DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction; 255 256 // Now, look at each merge-eligible global in turn. 257 258 // Keep track of the sets we already encountered to which we added the 259 // current global. 260 // Each element matches the same-index element in UsedGlobalSets. 261 // This lets us efficiently tell whether a set has already been expanded to 262 // include the current global. 263 std::vector<size_t> EncounteredUGS; 264 265 for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) { 266 GlobalVariable *GV = Globals[GI]; 267 268 // Reset the encountered sets for this global... 269 std::fill(EncounteredUGS.begin(), EncounteredUGS.end(), 0); 270 // ...and grow it in case we created new sets for the previous global. 271 EncounteredUGS.resize(UsedGlobalSets.size()); 272 273 // We might need to create a set that only consists of the current global. 274 // Keep track of its index into UsedGlobalSets. 275 size_t CurGVOnlySetIdx = 0; 276 277 // For each global, look at all its Uses. 278 for (auto &U : GV->uses()) { 279 // This Use might be a ConstantExpr. We're interested in Instruction 280 // users, so look through ConstantExpr... 281 Use *UI, *UE; 282 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) { 283 if (CE->use_empty()) 284 continue; 285 UI = &*CE->use_begin(); 286 UE = nullptr; 287 } else if (isa<Instruction>(U.getUser())) { 288 UI = &U; 289 UE = UI->getNext(); 290 } else { 291 continue; 292 } 293 294 // ...to iterate on all the instruction users of the global. 295 // Note that we iterate on Uses and not on Users to be able to getNext(). 296 for (; UI != UE; UI = UI->getNext()) { 297 Instruction *I = dyn_cast<Instruction>(UI->getUser()); 298 if (!I) 299 continue; 300 301 Function *ParentFn = I->getParent()->getParent(); 302 303 // If we're only optimizing for size, ignore non-minsize functions. 304 if (OnlyOptimizeForSize && 305 !ParentFn->hasFnAttribute(Attribute::MinSize)) 306 continue; 307 308 size_t UGSIdx = GlobalUsesByFunction[ParentFn]; 309 310 // If this is the first global the basic block uses, map it to the set 311 // consisting of this global only. 312 if (!UGSIdx) { 313 // If that set doesn't exist yet, create it. 314 if (!CurGVOnlySetIdx) { 315 CurGVOnlySetIdx = UsedGlobalSets.size(); 316 CreateGlobalSet().Globals.set(GI); 317 } else { 318 ++UsedGlobalSets[CurGVOnlySetIdx].UsageCount; 319 } 320 321 GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx; 322 continue; 323 } 324 325 // If we already encountered this BB, just increment the counter. 326 if (UsedGlobalSets[UGSIdx].Globals.test(GI)) { 327 ++UsedGlobalSets[UGSIdx].UsageCount; 328 continue; 329 } 330 331 // If not, the previous set wasn't actually used in this function. 332 --UsedGlobalSets[UGSIdx].UsageCount; 333 334 // If we already expanded the previous set to include this global, just 335 // reuse that expanded set. 336 if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) { 337 ++UsedGlobalSets[ExpandedIdx].UsageCount; 338 GlobalUsesByFunction[ParentFn] = ExpandedIdx; 339 continue; 340 } 341 342 // If not, create a new set consisting of the union of the previous set 343 // and this global. Mark it as encountered, so we can reuse it later. 344 GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] = 345 UsedGlobalSets.size(); 346 347 UsedGlobalSet &NewUGS = CreateGlobalSet(); 348 NewUGS.Globals.set(GI); 349 NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals; 350 } 351 } 352 } 353 354 // Now we found a bunch of sets of globals used together. We accumulated 355 // the number of times we encountered the sets (i.e., the number of blocks 356 // that use that exact set of globals). 357 // 358 // Multiply that by the size of the set to give us a crude profitability 359 // metric. 360 std::sort(UsedGlobalSets.begin(), UsedGlobalSets.end(), 361 [](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) { 362 return UGS1.Globals.count() * UGS1.UsageCount < 363 UGS2.Globals.count() * UGS2.UsageCount; 364 }); 365 366 // We can choose to merge all globals together, but ignore globals never used 367 // with another global. This catches the obviously non-profitable cases of 368 // having a single global, but is aggressive enough for any other case. 369 if (GlobalMergeIgnoreSingleUse) { 370 BitVector AllGlobals(Globals.size()); 371 for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) { 372 const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1]; 373 if (UGS.UsageCount == 0) 374 continue; 375 if (UGS.Globals.count() > 1) 376 AllGlobals |= UGS.Globals; 377 } 378 return doMerge(Globals, AllGlobals, M, isConst, AddrSpace); 379 } 380 381 // Starting from the sets with the best (=biggest) profitability, find a 382 // good combination. 383 // The ideal (and expensive) solution can only be found by trying all 384 // combinations, looking for the one with the best profitability. 385 // Don't be smart about it, and just pick the first compatible combination, 386 // starting with the sets with the best profitability. 387 BitVector PickedGlobals(Globals.size()); 388 bool Changed = false; 389 390 for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) { 391 const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1]; 392 if (UGS.UsageCount == 0) 393 continue; 394 if (PickedGlobals.anyCommon(UGS.Globals)) 395 continue; 396 PickedGlobals |= UGS.Globals; 397 // If the set only contains one global, there's no point in merging. 398 // Ignore the global for inclusion in other sets though, so keep it in 399 // PickedGlobals. 400 if (UGS.Globals.count() < 2) 401 continue; 402 Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace); 403 } 404 405 return Changed; 406 } 407 408 bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable *> &Globals, 409 const BitVector &GlobalSet, Module &M, bool isConst, 410 unsigned AddrSpace) const { 411 412 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 413 414 assert(Globals.size() > 1); 415 416 DEBUG(dbgs() << " Trying to merge set, starts with #" 417 << GlobalSet.find_first() << "\n"); 418 419 ssize_t i = GlobalSet.find_first(); 420 while (i != -1) { 421 ssize_t j = 0; 422 uint64_t MergedSize = 0; 423 std::vector<Type*> Tys; 424 std::vector<Constant*> Inits; 425 426 bool HasExternal = false; 427 GlobalVariable *TheFirstExternal = 0; 428 for (j = i; j != -1; j = GlobalSet.find_next(j)) { 429 Type *Ty = Globals[j]->getType()->getElementType(); 430 MergedSize += DL->getTypeAllocSize(Ty); 431 if (MergedSize > MaxOffset) { 432 break; 433 } 434 Tys.push_back(Ty); 435 Inits.push_back(Globals[j]->getInitializer()); 436 437 if (Globals[j]->hasExternalLinkage() && !HasExternal) { 438 HasExternal = true; 439 TheFirstExternal = Globals[j]; 440 } 441 } 442 443 // If merged variables doesn't have external linkage, we needn't to expose 444 // the symbol after merging. 445 GlobalValue::LinkageTypes Linkage = HasExternal 446 ? GlobalValue::ExternalLinkage 447 : GlobalValue::InternalLinkage; 448 449 StructType *MergedTy = StructType::get(M.getContext(), Tys); 450 Constant *MergedInit = ConstantStruct::get(MergedTy, Inits); 451 452 // If merged variables have external linkage, we use symbol name of the 453 // first variable merged as the suffix of global symbol name. This would 454 // be able to avoid the link-time naming conflict for globalm symbols. 455 GlobalVariable *MergedGV = new GlobalVariable( 456 M, MergedTy, isConst, Linkage, MergedInit, 457 HasExternal ? "_MergedGlobals_" + TheFirstExternal->getName() 458 : "_MergedGlobals", 459 nullptr, GlobalVariable::NotThreadLocal, AddrSpace); 460 461 for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k)) { 462 GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage(); 463 std::string Name = Globals[k]->getName(); 464 465 Constant *Idx[2] = { 466 ConstantInt::get(Int32Ty, 0), 467 ConstantInt::get(Int32Ty, idx++) 468 }; 469 Constant *GEP = 470 ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx); 471 Globals[k]->replaceAllUsesWith(GEP); 472 Globals[k]->eraseFromParent(); 473 474 if (Linkage != GlobalValue::InternalLinkage) { 475 // Generate a new alias... 476 auto *PTy = cast<PointerType>(GEP->getType()); 477 GlobalAlias::create(PTy, Linkage, Name, GEP, &M); 478 } 479 480 NumMerged++; 481 } 482 i = j; 483 } 484 485 return true; 486 } 487 488 void GlobalMerge::collectUsedGlobalVariables(Module &M) { 489 // Extract global variables from llvm.used array 490 const GlobalVariable *GV = M.getGlobalVariable("llvm.used"); 491 if (!GV || !GV->hasInitializer()) return; 492 493 // Should be an array of 'i8*'. 494 const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer()); 495 496 for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) 497 if (const GlobalVariable *G = 498 dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts())) 499 MustKeepGlobalVariables.insert(G); 500 } 501 502 void GlobalMerge::setMustKeepGlobalVariables(Module &M) { 503 collectUsedGlobalVariables(M); 504 505 for (Module::iterator IFn = M.begin(), IEndFn = M.end(); IFn != IEndFn; 506 ++IFn) { 507 for (Function::iterator IBB = IFn->begin(), IEndBB = IFn->end(); 508 IBB != IEndBB; ++IBB) { 509 // Follow the invoke link to find the landing pad instruction 510 const InvokeInst *II = dyn_cast<InvokeInst>(IBB->getTerminator()); 511 if (!II) continue; 512 513 const LandingPadInst *LPInst = II->getUnwindDest()->getLandingPadInst(); 514 // Look for globals in the clauses of the landing pad instruction 515 for (unsigned Idx = 0, NumClauses = LPInst->getNumClauses(); 516 Idx != NumClauses; ++Idx) 517 if (const GlobalVariable *GV = 518 dyn_cast<GlobalVariable>(LPInst->getClause(Idx) 519 ->stripPointerCasts())) 520 MustKeepGlobalVariables.insert(GV); 521 } 522 } 523 } 524 525 bool GlobalMerge::doInitialization(Module &M) { 526 if (!EnableGlobalMerge) 527 return false; 528 529 DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals, 530 BSSGlobals; 531 bool Changed = false; 532 setMustKeepGlobalVariables(M); 533 534 // Grab all non-const globals. 535 for (Module::global_iterator I = M.global_begin(), 536 E = M.global_end(); I != E; ++I) { 537 // Merge is safe for "normal" internal or external globals only 538 if (I->isDeclaration() || I->isThreadLocal() || I->hasSection()) 539 continue; 540 541 if (!(EnableGlobalMergeOnExternal && I->hasExternalLinkage()) && 542 !I->hasInternalLinkage()) 543 continue; 544 545 PointerType *PT = dyn_cast<PointerType>(I->getType()); 546 assert(PT && "Global variable is not a pointer!"); 547 548 unsigned AddressSpace = PT->getAddressSpace(); 549 550 // Ignore fancy-aligned globals for now. 551 unsigned Alignment = DL->getPreferredAlignment(I); 552 Type *Ty = I->getType()->getElementType(); 553 if (Alignment > DL->getABITypeAlignment(Ty)) 554 continue; 555 556 // Ignore all 'special' globals. 557 if (I->getName().startswith("llvm.") || 558 I->getName().startswith(".llvm.")) 559 continue; 560 561 // Ignore all "required" globals: 562 if (isMustKeepGlobalVariable(I)) 563 continue; 564 565 if (DL->getTypeAllocSize(Ty) < MaxOffset) { 566 if (TargetLoweringObjectFile::getKindForGlobal(I, *TM).isBSSLocal()) 567 BSSGlobals[AddressSpace].push_back(I); 568 else if (I->isConstant()) 569 ConstGlobals[AddressSpace].push_back(I); 570 else 571 Globals[AddressSpace].push_back(I); 572 } 573 } 574 575 for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator 576 I = Globals.begin(), E = Globals.end(); I != E; ++I) 577 if (I->second.size() > 1) 578 Changed |= doMerge(I->second, M, false, I->first); 579 580 for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator 581 I = BSSGlobals.begin(), E = BSSGlobals.end(); I != E; ++I) 582 if (I->second.size() > 1) 583 Changed |= doMerge(I->second, M, false, I->first); 584 585 if (EnableGlobalMergeOnConst) 586 for (DenseMap<unsigned, SmallVector<GlobalVariable*, 16> >::iterator 587 I = ConstGlobals.begin(), E = ConstGlobals.end(); I != E; ++I) 588 if (I->second.size() > 1) 589 Changed |= doMerge(I->second, M, true, I->first); 590 591 return Changed; 592 } 593 594 bool GlobalMerge::runOnFunction(Function &F) { 595 return false; 596 } 597 598 bool GlobalMerge::doFinalization(Module &M) { 599 MustKeepGlobalVariables.clear(); 600 return false; 601 } 602 603 Pass *llvm::createGlobalMergePass(const TargetMachine *TM, unsigned Offset, 604 bool OnlyOptimizeForSize) { 605 return new GlobalMerge(TM, Offset, OnlyOptimizeForSize); 606 } 607