1 //===--- Passes/FrameAnalysis.cpp -----------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 //===----------------------------------------------------------------------===// 10 #include "bolt/Passes/FrameAnalysis.h" 11 #include "bolt/Core/ParallelUtilities.h" 12 #include "bolt/Passes/CallGraphWalker.h" 13 #include "llvm/Support/Timer.h" 14 #include <fstream> 15 #include <stack> 16 17 #define DEBUG_TYPE "fa" 18 19 using namespace llvm; 20 21 namespace opts { 22 extern cl::OptionCategory BoltOptCategory; 23 extern cl::opt<unsigned> Verbosity; 24 25 static cl::list<std::string> 26 FrameOptFunctionNames("funcs-fop", cl::CommaSeparated, 27 cl::desc("list of functions to apply frame opts"), 28 cl::value_desc("func1,func2,func3,...")); 29 30 static cl::opt<std::string> FrameOptFunctionNamesFile( 31 "funcs-file-fop", 32 cl::desc("file with list of functions to frame optimize")); 33 34 static cl::opt<bool> 35 TimeFA("time-fa", 36 cl::desc("time frame analysis steps"), 37 cl::ReallyHidden, 38 cl::ZeroOrMore, 39 cl::cat(BoltOptCategory)); 40 41 bool shouldFrameOptimize(const llvm::bolt::BinaryFunction &Function) { 42 if (Function.hasUnknownControlFlow()) 43 return false; 44 45 if (!FrameOptFunctionNamesFile.empty()) { 46 assert(!FrameOptFunctionNamesFile.empty() && "unexpected empty file name"); 47 std::ifstream FuncsFile(FrameOptFunctionNamesFile, std::ios::in); 48 std::string FuncName; 49 while (std::getline(FuncsFile, FuncName)) { 50 FrameOptFunctionNames.push_back(FuncName); 51 } 52 FrameOptFunctionNamesFile = ""; 53 } 54 55 bool IsValid = true; 56 if (!FrameOptFunctionNames.empty()) { 57 IsValid = false; 58 for (std::string &Name : FrameOptFunctionNames) { 59 if (Function.hasName(Name)) { 60 IsValid = true; 61 break; 62 } 63 } 64 } 65 if (!IsValid) 66 return false; 67 68 return IsValid; 69 } 70 } // namespace opts 71 72 namespace llvm { 73 namespace bolt { 74 75 raw_ostream &operator<<(raw_ostream &OS, const FrameIndexEntry &FIE) { 76 OS << "FrameIndexEntry<IsLoad: " << FIE.IsLoad << ", IsStore: " << FIE.IsStore 77 << ", IsStoreFromReg: " << FIE.IsStoreFromReg 78 << ", RegOrImm: " << FIE.RegOrImm << ", StackOffset: "; 79 if (FIE.StackOffset < 0) 80 OS << "-" << Twine::utohexstr(-FIE.StackOffset); 81 else 82 OS << "+" << Twine::utohexstr(FIE.StackOffset); 83 OS << ", Size: " << static_cast<int>(FIE.Size) 84 << ", IsSimple: " << FIE.IsSimple << ">"; 85 return OS; 86 } 87 88 namespace { 89 90 /// This class should be used to iterate through basic blocks in layout order 91 /// to analyze instructions for frame accesses. The user should call 92 /// enterNewBB() whenever starting analyzing a new BB and doNext() for each 93 /// instruction. After doNext(), if isValidAccess() returns true, it means the 94 /// current instruction accesses the frame and getFIE() may be used to obtain 95 /// details about this access. 96 class FrameAccessAnalysis { 97 /// We depend on Stack Pointer Tracking to figure out the current SP offset 98 /// value at a given program point 99 StackPointerTracking &SPT; 100 101 /// Context vars 102 const BinaryContext &BC; 103 const BinaryFunction &BF; 104 // Vars used for storing useful CFI info to give us a hint about how the stack 105 // is used in this function 106 int SPOffset{0}; 107 int FPOffset{0}; 108 int64_t CfaOffset{-8}; 109 uint16_t CfaReg{7}; 110 std::stack<std::pair<int64_t, uint16_t>> CFIStack; 111 /// Our pointer to access SPT info 112 const MCInst *Prev{nullptr}; 113 /// Info about the last frame access 114 bool IsValidAccess{false}; 115 FrameIndexEntry FIE; 116 117 bool decodeFrameAccess(const MCInst &Inst) { 118 int32_t SrcImm = 0; 119 MCPhysReg Reg = 0; 120 int64_t StackOffset = 0; 121 bool IsIndexed = false; 122 if (!BC.MIB->isStackAccess( 123 Inst, FIE.IsLoad, FIE.IsStore, FIE.IsStoreFromReg, Reg, SrcImm, 124 FIE.StackPtrReg, StackOffset, FIE.Size, FIE.IsSimple, IsIndexed)) { 125 return true; 126 } 127 128 if (IsIndexed || FIE.Size == 0) { 129 LLVM_DEBUG(dbgs() << "Giving up on indexed memory access/unknown size\n"); 130 LLVM_DEBUG(dbgs() << "Blame insn: "); 131 LLVM_DEBUG(Inst.dump()); 132 return false; 133 } 134 135 assert(FIE.Size != 0); 136 137 FIE.RegOrImm = SrcImm; 138 if (FIE.IsLoad || FIE.IsStoreFromReg) 139 FIE.RegOrImm = Reg; 140 141 if (FIE.StackPtrReg == BC.MIB->getStackPointer() && SPOffset != SPT.EMPTY && 142 SPOffset != SPT.SUPERPOSITION) { 143 LLVM_DEBUG( 144 dbgs() << "Adding access via SP while CFA reg is another one\n"); 145 FIE.StackOffset = SPOffset + StackOffset; 146 } else if (FIE.StackPtrReg == BC.MIB->getFramePointer() && 147 FPOffset != SPT.EMPTY && FPOffset != SPT.SUPERPOSITION) { 148 LLVM_DEBUG( 149 dbgs() << "Adding access via FP while CFA reg is another one\n"); 150 FIE.StackOffset = FPOffset + StackOffset; 151 } else if (FIE.StackPtrReg == 152 *BC.MRI->getLLVMRegNum(CfaReg, /*isEH=*/false)) { 153 FIE.StackOffset = CfaOffset + StackOffset; 154 } else { 155 LLVM_DEBUG( 156 dbgs() << "Found stack access with reg different than cfa reg.\n"); 157 LLVM_DEBUG(dbgs() << "\tCurrent CFA reg: " << CfaReg 158 << "\n\tStack access reg: " << FIE.StackPtrReg << "\n"); 159 LLVM_DEBUG(dbgs() << "Blame insn: "); 160 LLVM_DEBUG(Inst.dump()); 161 return false; 162 } 163 IsValidAccess = true; 164 return true; 165 } 166 167 public: 168 FrameAccessAnalysis(BinaryFunction &BF, StackPointerTracking &SPT) 169 : SPT(SPT), BC(BF.getBinaryContext()), BF(BF) {} 170 171 void enterNewBB() { Prev = nullptr; } 172 const FrameIndexEntry &getFIE() const { return FIE; } 173 int getSPOffset() const { return SPOffset; } 174 bool isValidAccess() const { return IsValidAccess; } 175 176 bool doNext(const BinaryBasicBlock &BB, const MCInst &Inst) { 177 IsValidAccess = false; 178 std::tie(SPOffset, FPOffset) = 179 Prev ? *SPT.getStateAt(*Prev) : *SPT.getStateAt(BB); 180 Prev = &Inst; 181 // Use CFI information to keep track of which register is being used to 182 // access the frame 183 if (BC.MIB->isCFI(Inst)) { 184 const MCCFIInstruction *CFI = BF.getCFIFor(Inst); 185 switch (CFI->getOperation()) { 186 case MCCFIInstruction::OpDefCfa: 187 CfaOffset = CFI->getOffset(); 188 LLVM_FALLTHROUGH; 189 case MCCFIInstruction::OpDefCfaRegister: 190 CfaReg = CFI->getRegister(); 191 break; 192 case MCCFIInstruction::OpDefCfaOffset: 193 CfaOffset = CFI->getOffset(); 194 break; 195 case MCCFIInstruction::OpRememberState: 196 CFIStack.push(std::make_pair(CfaOffset, CfaReg)); 197 break; 198 case MCCFIInstruction::OpRestoreState: { 199 if (CFIStack.empty()) { 200 dbgs() << "Assertion is about to fail: " << BF.getPrintName() << "\n"; 201 } 202 assert(!CFIStack.empty() && "Corrupt CFI stack"); 203 std::pair<int64_t, uint16_t> &Elem = CFIStack.top(); 204 CFIStack.pop(); 205 CfaOffset = Elem.first; 206 CfaReg = Elem.second; 207 break; 208 } 209 case MCCFIInstruction::OpAdjustCfaOffset: 210 llvm_unreachable("Unhandled AdjustCfaOffset"); 211 break; 212 default: 213 break; 214 } 215 return true; 216 } 217 218 if (BC.MIB->escapesVariable(Inst, SPT.HasFramePointer)) { 219 LLVM_DEBUG( 220 dbgs() << "Leaked stack address, giving up on this function.\n"); 221 LLVM_DEBUG(dbgs() << "Blame insn: "); 222 LLVM_DEBUG(Inst.dump()); 223 return false; 224 } 225 226 return decodeFrameAccess(Inst); 227 } 228 }; 229 230 } // end anonymous namespace 231 232 void FrameAnalysis::addArgAccessesFor(MCInst &Inst, ArgAccesses &&AA) { 233 if (ErrorOr<ArgAccesses &> OldAA = getArgAccessesFor(Inst)) { 234 if (OldAA->AssumeEverything) 235 return; 236 *OldAA = std::move(AA); 237 return; 238 } 239 if (AA.AssumeEverything) { 240 // Index 0 in ArgAccessesVector represents an "assumeeverything" entry 241 BC.MIB->addAnnotation(Inst, "ArgAccessEntry", 0U); 242 return; 243 } 244 BC.MIB->addAnnotation(Inst, "ArgAccessEntry", 245 (unsigned)ArgAccessesVector.size()); 246 ArgAccessesVector.emplace_back(std::move(AA)); 247 } 248 249 void FrameAnalysis::addArgInStackAccessFor(MCInst &Inst, 250 const ArgInStackAccess &Arg) { 251 ErrorOr<ArgAccesses &> AA = getArgAccessesFor(Inst); 252 if (!AA) { 253 addArgAccessesFor(Inst, ArgAccesses(false)); 254 AA = getArgAccessesFor(Inst); 255 assert(AA && "Object setup failed"); 256 } 257 std::set<ArgInStackAccess> &Set = AA->Set; 258 assert(!AA->AssumeEverything && "Adding arg to AssumeEverything set"); 259 Set.emplace(Arg); 260 } 261 262 void FrameAnalysis::addFIEFor(MCInst &Inst, const FrameIndexEntry &FIE) { 263 BC.MIB->addAnnotation(Inst, "FrameAccessEntry", 264 (unsigned)FIEVector.size()); 265 FIEVector.emplace_back(FIE); 266 } 267 268 ErrorOr<ArgAccesses &> FrameAnalysis::getArgAccessesFor(const MCInst &Inst) { 269 if (auto Idx = BC.MIB->tryGetAnnotationAs<unsigned>(Inst, "ArgAccessEntry")) { 270 assert(ArgAccessesVector.size() > *Idx && "Out of bounds"); 271 return ArgAccessesVector[*Idx]; 272 } 273 return make_error_code(errc::result_out_of_range); 274 } 275 276 ErrorOr<const ArgAccesses &> 277 FrameAnalysis::getArgAccessesFor(const MCInst &Inst) const { 278 if (auto Idx = BC.MIB->tryGetAnnotationAs<unsigned>(Inst, "ArgAccessEntry")) { 279 assert(ArgAccessesVector.size() > *Idx && "Out of bounds"); 280 return ArgAccessesVector[*Idx]; 281 } 282 return make_error_code(errc::result_out_of_range); 283 } 284 285 ErrorOr<const FrameIndexEntry &> 286 FrameAnalysis::getFIEFor(const MCInst &Inst) const { 287 if (auto Idx = 288 BC.MIB->tryGetAnnotationAs<unsigned>(Inst, "FrameAccessEntry")) { 289 assert(FIEVector.size() > *Idx && "Out of bounds"); 290 return FIEVector[*Idx]; 291 } 292 return make_error_code(errc::result_out_of_range); 293 } 294 295 void FrameAnalysis::traverseCG(BinaryFunctionCallGraph &CG) { 296 CallGraphWalker CGWalker(CG); 297 298 CGWalker.registerVisitor([&](BinaryFunction *Func) -> bool { 299 return computeArgsAccessed(*Func); 300 }); 301 302 CGWalker.walk(); 303 304 DEBUG_WITH_TYPE("ra", { 305 for (auto &MapEntry : ArgsTouchedMap) { 306 const BinaryFunction *Func = MapEntry.first; 307 const auto &Set = MapEntry.second; 308 dbgs() << "Args accessed for " << Func->getPrintName() << ": "; 309 if (!Set.empty() && Set.count(std::make_pair(-1, 0))) { 310 dbgs() << "assume everything"; 311 } else { 312 for (const std::pair<int64_t, uint8_t> &Entry : Set) { 313 dbgs() << "[" << Entry.first << ", " << (int)Entry.second << "] "; 314 } 315 } 316 dbgs() << "\n"; 317 } 318 }); 319 } 320 321 bool FrameAnalysis::updateArgsTouchedFor(const BinaryFunction &BF, MCInst &Inst, 322 int CurOffset) { 323 if (!BC.MIB->isCall(Inst)) 324 return false; 325 326 std::set<int64_t> Res; 327 const MCSymbol *TargetSymbol = BC.MIB->getTargetSymbol(Inst); 328 // If indirect call, we conservatively assume it accesses all stack positions 329 if (TargetSymbol == nullptr) { 330 addArgAccessesFor(Inst, ArgAccesses(/*AssumeEverything=*/true)); 331 if (!FunctionsRequireAlignment.count(&BF)) { 332 FunctionsRequireAlignment.insert(&BF); 333 return true; 334 } 335 return false; 336 } 337 338 const BinaryFunction *Function = BC.getFunctionForSymbol(TargetSymbol); 339 // Call to a function without a BinaryFunction object. Conservatively assume 340 // it accesses all stack positions 341 if (Function == nullptr) { 342 addArgAccessesFor(Inst, ArgAccesses(/*AssumeEverything=*/true)); 343 if (!FunctionsRequireAlignment.count(&BF)) { 344 FunctionsRequireAlignment.insert(&BF); 345 return true; 346 } 347 return false; 348 } 349 350 auto Iter = ArgsTouchedMap.find(Function); 351 352 bool Changed = false; 353 if (BC.MIB->isTailCall(Inst) && Iter != ArgsTouchedMap.end()) { 354 // Ignore checking CurOffset because we can't always reliably determine the 355 // offset specially after an epilogue, where tailcalls happen. It should be 356 // -8. 357 for (std::pair<int64_t, uint8_t> Elem : Iter->second) { 358 if (ArgsTouchedMap[&BF].find(Elem) == ArgsTouchedMap[&BF].end()) { 359 ArgsTouchedMap[&BF].emplace(Elem); 360 Changed = true; 361 } 362 } 363 } 364 if (FunctionsRequireAlignment.count(Function) && 365 !FunctionsRequireAlignment.count(&BF)) { 366 Changed = true; 367 FunctionsRequireAlignment.insert(&BF); 368 } 369 if (Iter == ArgsTouchedMap.end()) 370 return Changed; 371 372 if (CurOffset == StackPointerTracking::EMPTY || 373 CurOffset == StackPointerTracking::SUPERPOSITION) { 374 addArgAccessesFor(Inst, ArgAccesses(/*AssumeEverything=*/true)); 375 return Changed; 376 } 377 378 for (std::pair<int64_t, uint8_t> Elem : Iter->second) { 379 if (Elem.first == -1) { 380 addArgAccessesFor(Inst, ArgAccesses(/*AssumeEverything=*/true)); 381 break; 382 } 383 LLVM_DEBUG(dbgs() << "Added arg in stack access annotation " 384 << CurOffset + Elem.first << "\n"); 385 addArgInStackAccessFor( 386 Inst, ArgInStackAccess{/*StackOffset=*/CurOffset + Elem.first, 387 /*Size=*/Elem.second}); 388 } 389 return Changed; 390 } 391 392 bool FrameAnalysis::computeArgsAccessed(BinaryFunction &BF) { 393 if (!BF.isSimple() || !BF.hasCFG()) { 394 LLVM_DEBUG(dbgs() << "Treating " << BF.getPrintName() 395 << " conservatively.\n"); 396 ArgsTouchedMap[&BF].emplace(std::make_pair(-1, 0)); 397 if (!FunctionsRequireAlignment.count(&BF)) { 398 FunctionsRequireAlignment.insert(&BF); 399 return true; 400 } 401 return false; 402 } 403 404 LLVM_DEBUG(dbgs() << "Now computing args accessed for: " << BF.getPrintName() 405 << "\n"); 406 bool UpdatedArgsTouched = false; 407 bool NoInfo = false; 408 FrameAccessAnalysis FAA(BF, getSPT(BF)); 409 410 for (BinaryBasicBlock *BB : BF.layout()) { 411 FAA.enterNewBB(); 412 413 for (MCInst &Inst : *BB) { 414 if (!FAA.doNext(*BB, Inst)) { 415 ArgsTouchedMap[&BF].emplace(std::make_pair(-1, 0)); 416 NoInfo = true; 417 break; 418 } 419 420 // Check for calls -- attach stack accessing info to them regarding their 421 // target 422 if (updateArgsTouchedFor(BF, Inst, FAA.getSPOffset())) 423 UpdatedArgsTouched = true; 424 425 // Check for stack accesses that affect callers 426 if (!FAA.isValidAccess()) 427 continue; 428 429 const FrameIndexEntry &FIE = FAA.getFIE(); 430 if (FIE.StackOffset < 0) 431 continue; 432 if (ArgsTouchedMap[&BF].find(std::make_pair(FIE.StackOffset, FIE.Size)) != 433 ArgsTouchedMap[&BF].end()) 434 continue; 435 436 // Record accesses to the previous stack frame 437 ArgsTouchedMap[&BF].emplace(std::make_pair(FIE.StackOffset, FIE.Size)); 438 UpdatedArgsTouched = true; 439 LLVM_DEBUG({ 440 dbgs() << "Arg access offset " << FIE.StackOffset << " added to:\n"; 441 BC.printInstruction(dbgs(), Inst, 0, &BF, true); 442 }); 443 } 444 if (NoInfo) 445 break; 446 } 447 if (FunctionsRequireAlignment.count(&BF)) 448 return UpdatedArgsTouched; 449 450 if (NoInfo) { 451 FunctionsRequireAlignment.insert(&BF); 452 return true; 453 } 454 455 for (BinaryBasicBlock &BB : BF) { 456 for (MCInst &Inst : BB) { 457 if (BC.MIB->requiresAlignedAddress(Inst)) { 458 FunctionsRequireAlignment.insert(&BF); 459 return true; 460 } 461 } 462 } 463 return UpdatedArgsTouched; 464 } 465 466 bool FrameAnalysis::restoreFrameIndex(BinaryFunction &BF) { 467 FrameAccessAnalysis FAA(BF, getSPT(BF)); 468 469 LLVM_DEBUG(dbgs() << "Restoring frame indices for \"" << BF.getPrintName() 470 << "\"\n"); 471 for (BinaryBasicBlock *BB : BF.layout()) { 472 LLVM_DEBUG(dbgs() << "\tNow at BB " << BB->getName() << "\n"); 473 FAA.enterNewBB(); 474 475 for (MCInst &Inst : *BB) { 476 if (!FAA.doNext(*BB, Inst)) 477 return false; 478 LLVM_DEBUG({ 479 dbgs() << "\t\tNow at "; 480 Inst.dump(); 481 dbgs() << "\t\t\tSP offset is " << FAA.getSPOffset() << "\n"; 482 }); 483 484 if (!FAA.isValidAccess()) 485 continue; 486 487 const FrameIndexEntry &FIE = FAA.getFIE(); 488 489 addFIEFor(Inst, FIE); 490 LLVM_DEBUG({ 491 dbgs() << "Frame index annotation " << FIE << " added to:\n"; 492 BC.printInstruction(dbgs(), Inst, 0, &BF, true); 493 }); 494 } 495 } 496 return true; 497 } 498 499 void FrameAnalysis::cleanAnnotations() { 500 NamedRegionTimer T("cleanannotations", "clean annotations", "FA", 501 "FA breakdown", opts::TimeFA); 502 503 ParallelUtilities::WorkFuncTy CleanFunction = [&](BinaryFunction &BF) { 504 for (BinaryBasicBlock &BB : BF) { 505 for (MCInst &Inst : BB) { 506 BC.MIB->removeAnnotation(Inst, "ArgAccessEntry"); 507 BC.MIB->removeAnnotation(Inst, "FrameAccessEntry"); 508 } 509 } 510 }; 511 512 ParallelUtilities::runOnEachFunction( 513 BC, ParallelUtilities::SchedulingPolicy::SP_INST_LINEAR, CleanFunction, 514 ParallelUtilities::PredicateTy(nullptr), "cleanAnnotations"); 515 } 516 517 FrameAnalysis::FrameAnalysis(BinaryContext &BC, BinaryFunctionCallGraph &CG) 518 : BC(BC) { 519 // Position 0 of the vector should be always associated with "assume access 520 // everything". 521 ArgAccessesVector.emplace_back(ArgAccesses(/*AssumeEverything*/ true)); 522 523 if (!opts::NoThreads) { 524 NamedRegionTimer T1("precomputespt", "pre-compute spt", "FA", 525 "FA breakdown", opts::TimeFA); 526 preComputeSPT(); 527 } 528 529 { 530 NamedRegionTimer T1("traversecg", "traverse call graph", "FA", 531 "FA breakdown", opts::TimeFA); 532 traverseCG(CG); 533 } 534 535 for (auto &I : BC.getBinaryFunctions()) { 536 uint64_t Count = I.second.getExecutionCount(); 537 if (Count != BinaryFunction::COUNT_NO_PROFILE) 538 CountDenominator += Count; 539 540 // "shouldOptimize" for passes that run after finalize 541 if (!(I.second.isSimple() && I.second.hasCFG() && !I.second.isIgnored()) || 542 !opts::shouldFrameOptimize(I.second)) { 543 ++NumFunctionsNotOptimized; 544 if (Count != BinaryFunction::COUNT_NO_PROFILE) 545 CountFunctionsNotOptimized += Count; 546 continue; 547 } 548 549 { 550 NamedRegionTimer T1("restorefi", "restore frame index", "FA", 551 "FA breakdown", opts::TimeFA); 552 if (!restoreFrameIndex(I.second)) { 553 ++NumFunctionsFailedRestoreFI; 554 uint64_t Count = I.second.getExecutionCount(); 555 if (Count != BinaryFunction::COUNT_NO_PROFILE) 556 CountFunctionsFailedRestoreFI += Count; 557 continue; 558 } 559 } 560 AnalyzedFunctions.insert(&I.second); 561 } 562 563 { 564 NamedRegionTimer T1("clearspt", "clear spt", "FA", "FA breakdown", 565 opts::TimeFA); 566 clearSPTMap(); 567 568 // Clean up memory allocated for annotation values 569 if (!opts::NoThreads) { 570 for (MCPlusBuilder::AllocatorIdTy Id : SPTAllocatorsId) 571 BC.MIB->freeValuesAllocator(Id); 572 } 573 } 574 } 575 576 void FrameAnalysis::printStats() { 577 outs() << "BOLT-INFO: FRAME ANALYSIS: " << NumFunctionsNotOptimized 578 << " function(s) " 579 << format("(%.1lf%% dyn cov)", 580 (100.0 * CountFunctionsNotOptimized / CountDenominator)) 581 << " were not optimized.\n" 582 << "BOLT-INFO: FRAME ANALYSIS: " << NumFunctionsFailedRestoreFI 583 << " function(s) " 584 << format("(%.1lf%% dyn cov)", 585 (100.0 * CountFunctionsFailedRestoreFI / CountDenominator)) 586 << " could not have its frame indices restored.\n"; 587 } 588 589 void FrameAnalysis::clearSPTMap() { 590 if (opts::NoThreads) { 591 SPTMap.clear(); 592 return; 593 } 594 595 ParallelUtilities::WorkFuncTy ClearFunctionSPT = [&](BinaryFunction &BF) { 596 std::unique_ptr<StackPointerTracking> &SPTPtr = SPTMap.find(&BF)->second; 597 SPTPtr.reset(); 598 }; 599 600 ParallelUtilities::PredicateTy SkipFunc = [&](const BinaryFunction &BF) { 601 return !BF.isSimple() || !BF.hasCFG(); 602 }; 603 604 ParallelUtilities::runOnEachFunction( 605 BC, ParallelUtilities::SchedulingPolicy::SP_INST_LINEAR, ClearFunctionSPT, 606 SkipFunc, "clearSPTMap"); 607 608 SPTMap.clear(); 609 } 610 611 void FrameAnalysis::preComputeSPT() { 612 // Make sure that the SPTMap is empty 613 assert(SPTMap.size() == 0); 614 615 // Create map entries to allow lock-free parallel execution 616 for (auto &BFI : BC.getBinaryFunctions()) { 617 BinaryFunction &BF = BFI.second; 618 if (!BF.isSimple() || !BF.hasCFG()) 619 continue; 620 SPTMap.emplace(&BF, std::unique_ptr<StackPointerTracking>()); 621 } 622 623 // Create an index for the SPT annotation to allow lock-free parallel 624 // execution 625 BC.MIB->getOrCreateAnnotationIndex("StackPointerTracking"); 626 627 // Run SPT in parallel 628 ParallelUtilities::WorkFuncWithAllocTy ProcessFunction = 629 [&](BinaryFunction &BF, MCPlusBuilder::AllocatorIdTy AllocId) { 630 std::unique_ptr<StackPointerTracking> &SPTPtr = 631 SPTMap.find(&BF)->second; 632 SPTPtr = std::make_unique<StackPointerTracking>(BF, AllocId); 633 SPTPtr->run(); 634 }; 635 636 ParallelUtilities::PredicateTy SkipPredicate = [&](const BinaryFunction &BF) { 637 return !BF.isSimple() || !BF.hasCFG(); 638 }; 639 640 ParallelUtilities::runOnEachFunctionWithUniqueAllocId( 641 BC, ParallelUtilities::SchedulingPolicy::SP_BB_QUADRATIC, ProcessFunction, 642 SkipPredicate, "preComputeSPT"); 643 } 644 645 } // namespace bolt 646 } // namespace llvm 647