1 //===-- WebAssemblyCFGStackify.cpp - CFG Stackification -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// \brief This file implements a CFG stacking pass. 12 /// 13 /// This pass reorders the blocks in a function to put them into topological 14 /// order, ignoring loop backedges, and without any loop being interrupted 15 /// by a block not dominated by the loop header, with special care to keep the 16 /// order as similar as possible to the original order. 17 /// 18 /// Then, it inserts BLOCK and LOOP markers to mark the start of scopes, since 19 /// scope boundaries serve as the labels for WebAssembly's control transfers. 20 /// 21 /// This is sufficient to convert arbitrary CFGs into a form that works on 22 /// WebAssembly, provided that all loops are single-entry. 23 /// 24 //===----------------------------------------------------------------------===// 25 26 #include "WebAssembly.h" 27 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h" 28 #include "WebAssemblyMachineFunctionInfo.h" 29 #include "WebAssemblySubtarget.h" 30 #include "llvm/ADT/PriorityQueue.h" 31 #include "llvm/ADT/SetVector.h" 32 #include "llvm/CodeGen/MachineDominators.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/Passes.h" 38 #include "llvm/Support/Debug.h" 39 #include "llvm/Support/raw_ostream.h" 40 using namespace llvm; 41 42 #define DEBUG_TYPE "wasm-cfg-stackify" 43 44 namespace { 45 class WebAssemblyCFGStackify final : public MachineFunctionPass { 46 StringRef getPassName() const override { return "WebAssembly CFG Stackify"; } 47 48 void getAnalysisUsage(AnalysisUsage &AU) const override { 49 AU.setPreservesCFG(); 50 AU.addRequired<MachineDominatorTree>(); 51 AU.addPreserved<MachineDominatorTree>(); 52 AU.addRequired<MachineLoopInfo>(); 53 AU.addPreserved<MachineLoopInfo>(); 54 MachineFunctionPass::getAnalysisUsage(AU); 55 } 56 57 bool runOnMachineFunction(MachineFunction &MF) override; 58 59 public: 60 static char ID; // Pass identification, replacement for typeid 61 WebAssemblyCFGStackify() : MachineFunctionPass(ID) {} 62 }; 63 } // end anonymous namespace 64 65 char WebAssemblyCFGStackify::ID = 0; 66 FunctionPass *llvm::createWebAssemblyCFGStackify() { 67 return new WebAssemblyCFGStackify(); 68 } 69 70 /// Return the "bottom" block of a loop. This differs from 71 /// MachineLoop::getBottomBlock in that it works even if the loop is 72 /// discontiguous. 73 static MachineBasicBlock *LoopBottom(const MachineLoop *Loop) { 74 MachineBasicBlock *Bottom = Loop->getHeader(); 75 for (MachineBasicBlock *MBB : Loop->blocks()) 76 if (MBB->getNumber() > Bottom->getNumber()) 77 Bottom = MBB; 78 return Bottom; 79 } 80 81 static void MaybeUpdateTerminator(MachineBasicBlock *MBB) { 82 #ifndef NDEBUG 83 bool AnyBarrier = false; 84 #endif 85 bool AllAnalyzable = true; 86 for (const MachineInstr &Term : MBB->terminators()) { 87 #ifndef NDEBUG 88 AnyBarrier |= Term.isBarrier(); 89 #endif 90 AllAnalyzable &= Term.isBranch() && !Term.isIndirectBranch(); 91 } 92 assert((AnyBarrier || AllAnalyzable) && 93 "AnalyzeBranch needs to analyze any block with a fallthrough"); 94 if (AllAnalyzable) 95 MBB->updateTerminator(); 96 } 97 98 namespace { 99 /// Sort blocks by their number. 100 struct CompareBlockNumbers { 101 bool operator()(const MachineBasicBlock *A, 102 const MachineBasicBlock *B) const { 103 return A->getNumber() > B->getNumber(); 104 } 105 }; 106 /// Sort blocks by their number in the opposite order.. 107 struct CompareBlockNumbersBackwards { 108 bool operator()(const MachineBasicBlock *A, 109 const MachineBasicBlock *B) const { 110 return A->getNumber() < B->getNumber(); 111 } 112 }; 113 /// Bookkeeping for a loop to help ensure that we don't mix blocks not dominated 114 /// by the loop header among the loop's blocks. 115 struct Entry { 116 const MachineLoop *Loop; 117 unsigned NumBlocksLeft; 118 119 /// List of blocks not dominated by Loop's header that are deferred until 120 /// after all of Loop's blocks have been seen. 121 std::vector<MachineBasicBlock *> Deferred; 122 123 explicit Entry(const MachineLoop *L) 124 : Loop(L), NumBlocksLeft(L->getNumBlocks()) {} 125 }; 126 } 127 128 /// Sort the blocks, taking special care to make sure that loops are not 129 /// interrupted by blocks not dominated by their header. 130 /// TODO: There are many opportunities for improving the heuristics here. 131 /// Explore them. 132 static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, 133 const MachineDominatorTree &MDT) { 134 // Prepare for a topological sort: Record the number of predecessors each 135 // block has, ignoring loop backedges. 136 MF.RenumberBlocks(); 137 SmallVector<unsigned, 16> NumPredsLeft(MF.getNumBlockIDs(), 0); 138 for (MachineBasicBlock &MBB : MF) { 139 unsigned N = MBB.pred_size(); 140 if (MachineLoop *L = MLI.getLoopFor(&MBB)) 141 if (L->getHeader() == &MBB) 142 for (const MachineBasicBlock *Pred : MBB.predecessors()) 143 if (L->contains(Pred)) 144 --N; 145 NumPredsLeft[MBB.getNumber()] = N; 146 } 147 148 // Topological sort the CFG, with additional constraints: 149 // - Between a loop header and the last block in the loop, there can be 150 // no blocks not dominated by the loop header. 151 // - It's desirable to preserve the original block order when possible. 152 // We use two ready lists; Preferred and Ready. Preferred has recently 153 // processed sucessors, to help preserve block sequences from the original 154 // order. Ready has the remaining ready blocks. 155 PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>, 156 CompareBlockNumbers> 157 Preferred; 158 PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>, 159 CompareBlockNumbersBackwards> 160 Ready; 161 SmallVector<Entry, 4> Loops; 162 for (MachineBasicBlock *MBB = &MF.front();;) { 163 const MachineLoop *L = MLI.getLoopFor(MBB); 164 if (L) { 165 // If MBB is a loop header, add it to the active loop list. We can't put 166 // any blocks that it doesn't dominate until we see the end of the loop. 167 if (L->getHeader() == MBB) 168 Loops.push_back(Entry(L)); 169 // For each active loop the block is in, decrement the count. If MBB is 170 // the last block in an active loop, take it off the list and pick up any 171 // blocks deferred because the header didn't dominate them. 172 for (Entry &E : Loops) 173 if (E.Loop->contains(MBB) && --E.NumBlocksLeft == 0) 174 for (auto DeferredBlock : E.Deferred) 175 Ready.push(DeferredBlock); 176 while (!Loops.empty() && Loops.back().NumBlocksLeft == 0) 177 Loops.pop_back(); 178 } 179 // The main topological sort logic. 180 for (MachineBasicBlock *Succ : MBB->successors()) { 181 // Ignore backedges. 182 if (MachineLoop *SuccL = MLI.getLoopFor(Succ)) 183 if (SuccL->getHeader() == Succ && SuccL->contains(MBB)) 184 continue; 185 // Decrement the predecessor count. If it's now zero, it's ready. 186 if (--NumPredsLeft[Succ->getNumber()] == 0) 187 Preferred.push(Succ); 188 } 189 // Determine the block to follow MBB. First try to find a preferred block, 190 // to preserve the original block order when possible. 191 MachineBasicBlock *Next = nullptr; 192 while (!Preferred.empty()) { 193 Next = Preferred.top(); 194 Preferred.pop(); 195 // If X isn't dominated by the top active loop header, defer it until that 196 // loop is done. 197 if (!Loops.empty() && 198 !MDT.dominates(Loops.back().Loop->getHeader(), Next)) { 199 Loops.back().Deferred.push_back(Next); 200 Next = nullptr; 201 continue; 202 } 203 // If Next was originally ordered before MBB, and it isn't because it was 204 // loop-rotated above the header, it's not preferred. 205 if (Next->getNumber() < MBB->getNumber() && 206 (!L || !L->contains(Next) || 207 L->getHeader()->getNumber() < Next->getNumber())) { 208 Ready.push(Next); 209 Next = nullptr; 210 continue; 211 } 212 break; 213 } 214 // If we didn't find a suitable block in the Preferred list, check the 215 // general Ready list. 216 if (!Next) { 217 // If there are no more blocks to process, we're done. 218 if (Ready.empty()) { 219 MaybeUpdateTerminator(MBB); 220 break; 221 } 222 for (;;) { 223 Next = Ready.top(); 224 Ready.pop(); 225 // If Next isn't dominated by the top active loop header, defer it until 226 // that loop is done. 227 if (!Loops.empty() && 228 !MDT.dominates(Loops.back().Loop->getHeader(), Next)) { 229 Loops.back().Deferred.push_back(Next); 230 continue; 231 } 232 break; 233 } 234 } 235 // Move the next block into place and iterate. 236 Next->moveAfter(MBB); 237 MaybeUpdateTerminator(MBB); 238 MBB = Next; 239 } 240 assert(Loops.empty() && "Active loop list not finished"); 241 MF.RenumberBlocks(); 242 243 #ifndef NDEBUG 244 SmallSetVector<MachineLoop *, 8> OnStack; 245 246 // Insert a sentinel representing the degenerate loop that starts at the 247 // function entry block and includes the entire function as a "loop" that 248 // executes once. 249 OnStack.insert(nullptr); 250 251 for (auto &MBB : MF) { 252 assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative."); 253 254 MachineLoop *Loop = MLI.getLoopFor(&MBB); 255 if (Loop && &MBB == Loop->getHeader()) { 256 // Loop header. The loop predecessor should be sorted above, and the other 257 // predecessors should be backedges below. 258 for (auto Pred : MBB.predecessors()) 259 assert( 260 (Pred->getNumber() < MBB.getNumber() || Loop->contains(Pred)) && 261 "Loop header predecessors must be loop predecessors or backedges"); 262 assert(OnStack.insert(Loop) && "Loops should be declared at most once."); 263 } else { 264 // Not a loop header. All predecessors should be sorted above. 265 for (auto Pred : MBB.predecessors()) 266 assert(Pred->getNumber() < MBB.getNumber() && 267 "Non-loop-header predecessors should be topologically sorted"); 268 assert(OnStack.count(MLI.getLoopFor(&MBB)) && 269 "Blocks must be nested in their loops"); 270 } 271 while (OnStack.size() > 1 && &MBB == LoopBottom(OnStack.back())) 272 OnStack.pop_back(); 273 } 274 assert(OnStack.pop_back_val() == nullptr && 275 "The function entry block shouldn't actually be a loop header"); 276 assert(OnStack.empty() && 277 "Control flow stack pushes and pops should be balanced."); 278 #endif 279 } 280 281 /// Test whether Pred has any terminators explicitly branching to MBB, as 282 /// opposed to falling through. Note that it's possible (eg. in unoptimized 283 /// code) for a branch instruction to both branch to a block and fallthrough 284 /// to it, so we check the actual branch operands to see if there are any 285 /// explicit mentions. 286 static bool ExplicitlyBranchesTo(MachineBasicBlock *Pred, 287 MachineBasicBlock *MBB) { 288 for (MachineInstr &MI : Pred->terminators()) 289 for (MachineOperand &MO : MI.explicit_operands()) 290 if (MO.isMBB() && MO.getMBB() == MBB) 291 return true; 292 return false; 293 } 294 295 /// Test whether MI is a child of some other node in an expression tree. 296 static bool IsChild(const MachineInstr &MI, 297 const WebAssemblyFunctionInfo &MFI) { 298 if (MI.getNumOperands() == 0) 299 return false; 300 const MachineOperand &MO = MI.getOperand(0); 301 if (!MO.isReg() || MO.isImplicit() || !MO.isDef()) 302 return false; 303 unsigned Reg = MO.getReg(); 304 return TargetRegisterInfo::isVirtualRegister(Reg) && 305 MFI.isVRegStackified(Reg); 306 } 307 308 /// Insert a BLOCK marker for branches to MBB (if needed). 309 static void PlaceBlockMarker( 310 MachineBasicBlock &MBB, MachineFunction &MF, 311 SmallVectorImpl<MachineBasicBlock *> &ScopeTops, 312 DenseMap<const MachineInstr *, MachineInstr *> &BlockTops, 313 DenseMap<const MachineInstr *, MachineInstr *> &LoopTops, 314 const WebAssemblyInstrInfo &TII, 315 const MachineLoopInfo &MLI, 316 MachineDominatorTree &MDT, 317 WebAssemblyFunctionInfo &MFI) { 318 // First compute the nearest common dominator of all forward non-fallthrough 319 // predecessors so that we minimize the time that the BLOCK is on the stack, 320 // which reduces overall stack height. 321 MachineBasicBlock *Header = nullptr; 322 bool IsBranchedTo = false; 323 int MBBNumber = MBB.getNumber(); 324 for (MachineBasicBlock *Pred : MBB.predecessors()) 325 if (Pred->getNumber() < MBBNumber) { 326 Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred; 327 if (ExplicitlyBranchesTo(Pred, &MBB)) 328 IsBranchedTo = true; 329 } 330 if (!Header) 331 return; 332 if (!IsBranchedTo) 333 return; 334 335 assert(&MBB != &MF.front() && "Header blocks shouldn't have predecessors"); 336 MachineBasicBlock *LayoutPred = &*std::prev(MachineFunction::iterator(&MBB)); 337 338 // If the nearest common dominator is inside a more deeply nested context, 339 // walk out to the nearest scope which isn't more deeply nested. 340 for (MachineFunction::iterator I(LayoutPred), E(Header); I != E; --I) { 341 if (MachineBasicBlock *ScopeTop = ScopeTops[I->getNumber()]) { 342 if (ScopeTop->getNumber() > Header->getNumber()) { 343 // Skip over an intervening scope. 344 I = std::next(MachineFunction::iterator(ScopeTop)); 345 } else { 346 // We found a scope level at an appropriate depth. 347 Header = ScopeTop; 348 break; 349 } 350 } 351 } 352 353 // Decide where in Header to put the BLOCK. 354 MachineBasicBlock::iterator InsertPos; 355 MachineLoop *HeaderLoop = MLI.getLoopFor(Header); 356 if (HeaderLoop && MBB.getNumber() > LoopBottom(HeaderLoop)->getNumber()) { 357 // Header is the header of a loop that does not lexically contain MBB, so 358 // the BLOCK needs to be above the LOOP, after any END constructs. 359 InsertPos = Header->begin(); 360 while (InsertPos->getOpcode() == WebAssembly::END_BLOCK || 361 InsertPos->getOpcode() == WebAssembly::END_LOOP) 362 ++InsertPos; 363 } else { 364 // Otherwise, insert the BLOCK as late in Header as we can, but before the 365 // beginning of the local expression tree and any nested BLOCKs. 366 InsertPos = Header->getFirstTerminator(); 367 while (InsertPos != Header->begin() && 368 IsChild(*std::prev(InsertPos), MFI) && 369 std::prev(InsertPos)->getOpcode() != WebAssembly::LOOP && 370 std::prev(InsertPos)->getOpcode() != WebAssembly::END_BLOCK && 371 std::prev(InsertPos)->getOpcode() != WebAssembly::END_LOOP) 372 --InsertPos; 373 } 374 375 // Add the BLOCK. 376 MachineInstr *Begin = BuildMI(*Header, InsertPos, DebugLoc(), 377 TII.get(WebAssembly::BLOCK)) 378 .addImm(WebAssembly::Void); 379 380 // Mark the end of the block. 381 InsertPos = MBB.begin(); 382 while (InsertPos != MBB.end() && 383 InsertPos->getOpcode() == WebAssembly::END_LOOP && 384 LoopTops[&*InsertPos]->getParent()->getNumber() >= Header->getNumber()) 385 ++InsertPos; 386 MachineInstr *End = BuildMI(MBB, InsertPos, DebugLoc(), 387 TII.get(WebAssembly::END_BLOCK)); 388 BlockTops[End] = Begin; 389 390 // Track the farthest-spanning scope that ends at this point. 391 int Number = MBB.getNumber(); 392 if (!ScopeTops[Number] || 393 ScopeTops[Number]->getNumber() > Header->getNumber()) 394 ScopeTops[Number] = Header; 395 } 396 397 /// Insert a LOOP marker for a loop starting at MBB (if it's a loop header). 398 static void PlaceLoopMarker( 399 MachineBasicBlock &MBB, MachineFunction &MF, 400 SmallVectorImpl<MachineBasicBlock *> &ScopeTops, 401 DenseMap<const MachineInstr *, MachineInstr *> &LoopTops, 402 const WebAssemblyInstrInfo &TII, const MachineLoopInfo &MLI) { 403 MachineLoop *Loop = MLI.getLoopFor(&MBB); 404 if (!Loop || Loop->getHeader() != &MBB) 405 return; 406 407 // The operand of a LOOP is the first block after the loop. If the loop is the 408 // bottom of the function, insert a dummy block at the end. 409 MachineBasicBlock *Bottom = LoopBottom(Loop); 410 auto Iter = std::next(MachineFunction::iterator(Bottom)); 411 if (Iter == MF.end()) { 412 MachineBasicBlock *Label = MF.CreateMachineBasicBlock(); 413 // Give it a fake predecessor so that AsmPrinter prints its label. 414 Label->addSuccessor(Label); 415 MF.push_back(Label); 416 Iter = std::next(MachineFunction::iterator(Bottom)); 417 } 418 MachineBasicBlock *AfterLoop = &*Iter; 419 420 // Mark the beginning of the loop (after the end of any existing loop that 421 // ends here). 422 auto InsertPos = MBB.begin(); 423 while (InsertPos != MBB.end() && 424 InsertPos->getOpcode() == WebAssembly::END_LOOP) 425 ++InsertPos; 426 MachineInstr *Begin = BuildMI(MBB, InsertPos, DebugLoc(), 427 TII.get(WebAssembly::LOOP)) 428 .addImm(WebAssembly::Void); 429 430 // Mark the end of the loop. 431 MachineInstr *End = BuildMI(*AfterLoop, AfterLoop->begin(), DebugLoc(), 432 TII.get(WebAssembly::END_LOOP)); 433 LoopTops[End] = Begin; 434 435 assert((!ScopeTops[AfterLoop->getNumber()] || 436 ScopeTops[AfterLoop->getNumber()]->getNumber() < MBB.getNumber()) && 437 "With block sorting the outermost loop for a block should be first."); 438 if (!ScopeTops[AfterLoop->getNumber()]) 439 ScopeTops[AfterLoop->getNumber()] = &MBB; 440 } 441 442 static unsigned 443 GetDepth(const SmallVectorImpl<const MachineBasicBlock *> &Stack, 444 const MachineBasicBlock *MBB) { 445 unsigned Depth = 0; 446 for (auto X : reverse(Stack)) { 447 if (X == MBB) 448 break; 449 ++Depth; 450 } 451 assert(Depth < Stack.size() && "Branch destination should be in scope"); 452 return Depth; 453 } 454 455 /// In normal assembly languages, when the end of a function is unreachable, 456 /// because the function ends in an infinite loop or a noreturn call or similar, 457 /// it isn't necessary to worry about the function return type at the end of 458 /// the function, because it's never reached. However, in WebAssembly, blocks 459 /// that end at the function end need to have a return type signature that 460 /// matches the function signature, even though it's unreachable. This function 461 /// checks for such cases and fixes up the signatures. 462 static void FixEndsAtEndOfFunction( 463 MachineFunction &MF, 464 const WebAssemblyFunctionInfo &MFI, 465 DenseMap<const MachineInstr *, MachineInstr *> &BlockTops, 466 DenseMap<const MachineInstr *, MachineInstr *> &LoopTops) { 467 assert(MFI.getResults().size() <= 1); 468 469 if (MFI.getResults().empty()) 470 return; 471 472 WebAssembly::ExprType retType; 473 switch (MFI.getResults().front().SimpleTy) { 474 case MVT::i32: retType = WebAssembly::I32; break; 475 case MVT::i64: retType = WebAssembly::I64; break; 476 case MVT::f32: retType = WebAssembly::F32; break; 477 case MVT::f64: retType = WebAssembly::F64; break; 478 case MVT::v16i8: retType = WebAssembly::I8x16; break; 479 case MVT::v8i16: retType = WebAssembly::I16x8; break; 480 case MVT::v4i32: retType = WebAssembly::I32x4; break; 481 case MVT::v2i64: retType = WebAssembly::I64x2; break; 482 case MVT::v4f32: retType = WebAssembly::F32x4; break; 483 case MVT::v2f64: retType = WebAssembly::F64x2; break; 484 default: llvm_unreachable("unexpected return type"); 485 } 486 487 for (MachineBasicBlock &MBB : reverse(MF)) { 488 for (MachineInstr &MI : reverse(MBB)) { 489 if (MI.isPosition() || MI.isDebugValue()) 490 continue; 491 if (MI.getOpcode() == WebAssembly::END_BLOCK) { 492 BlockTops[&MI]->getOperand(0).setImm(int32_t(retType)); 493 continue; 494 } 495 if (MI.getOpcode() == WebAssembly::END_LOOP) { 496 LoopTops[&MI]->getOperand(0).setImm(int32_t(retType)); 497 continue; 498 } 499 // Something other than an `end`. We're done. 500 return; 501 } 502 } 503 } 504 505 /// Insert LOOP and BLOCK markers at appropriate places. 506 static void PlaceMarkers(MachineFunction &MF, const MachineLoopInfo &MLI, 507 const WebAssemblyInstrInfo &TII, 508 MachineDominatorTree &MDT, 509 WebAssemblyFunctionInfo &MFI) { 510 // For each block whose label represents the end of a scope, record the block 511 // which holds the beginning of the scope. This will allow us to quickly skip 512 // over scoped regions when walking blocks. We allocate one more than the 513 // number of blocks in the function to accommodate for the possible fake block 514 // we may insert at the end. 515 SmallVector<MachineBasicBlock *, 8> ScopeTops(MF.getNumBlockIDs() + 1); 516 517 // For each LOOP_END, the corresponding LOOP. 518 DenseMap<const MachineInstr *, MachineInstr *> LoopTops; 519 520 // For each END_BLOCK, the corresponding BLOCK. 521 DenseMap<const MachineInstr *, MachineInstr *> BlockTops; 522 523 for (auto &MBB : MF) { 524 // Place the LOOP for MBB if MBB is the header of a loop. 525 PlaceLoopMarker(MBB, MF, ScopeTops, LoopTops, TII, MLI); 526 527 // Place the BLOCK for MBB if MBB is branched to from above. 528 PlaceBlockMarker(MBB, MF, ScopeTops, BlockTops, LoopTops, TII, MLI, MDT, MFI); 529 } 530 531 // Now rewrite references to basic blocks to be depth immediates. 532 SmallVector<const MachineBasicBlock *, 8> Stack; 533 for (auto &MBB : reverse(MF)) { 534 for (auto &MI : reverse(MBB)) { 535 switch (MI.getOpcode()) { 536 case WebAssembly::BLOCK: 537 assert(ScopeTops[Stack.back()->getNumber()]->getNumber() <= MBB.getNumber() && 538 "Block should be balanced"); 539 Stack.pop_back(); 540 break; 541 case WebAssembly::LOOP: 542 assert(Stack.back() == &MBB && "Loop top should be balanced"); 543 Stack.pop_back(); 544 break; 545 case WebAssembly::END_BLOCK: 546 Stack.push_back(&MBB); 547 break; 548 case WebAssembly::END_LOOP: 549 Stack.push_back(LoopTops[&MI]->getParent()); 550 break; 551 default: 552 if (MI.isTerminator()) { 553 // Rewrite MBB operands to be depth immediates. 554 SmallVector<MachineOperand, 4> Ops(MI.operands()); 555 while (MI.getNumOperands() > 0) 556 MI.RemoveOperand(MI.getNumOperands() - 1); 557 for (auto MO : Ops) { 558 if (MO.isMBB()) 559 MO = MachineOperand::CreateImm(GetDepth(Stack, MO.getMBB())); 560 MI.addOperand(MF, MO); 561 } 562 } 563 break; 564 } 565 } 566 } 567 assert(Stack.empty() && "Control flow should be balanced"); 568 569 // Fix up block/loop signatures at the end of the function to conform to 570 // WebAssembly's rules. 571 FixEndsAtEndOfFunction(MF, MFI, BlockTops, LoopTops); 572 } 573 574 bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) { 575 DEBUG(dbgs() << "********** CFG Stackifying **********\n" 576 "********** Function: " 577 << MF.getName() << '\n'); 578 579 const auto &MLI = getAnalysis<MachineLoopInfo>(); 580 auto &MDT = getAnalysis<MachineDominatorTree>(); 581 // Liveness is not tracked for VALUE_STACK physreg. 582 const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); 583 WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>(); 584 MF.getRegInfo().invalidateLiveness(); 585 586 // Sort the blocks, with contiguous loops. 587 SortBlocks(MF, MLI, MDT); 588 589 // Place the BLOCK and LOOP markers to indicate the beginnings of scopes. 590 PlaceMarkers(MF, MLI, TII, MDT, MFI); 591 592 return true; 593 } 594