Lines Matching +full:tri +full:- +full:state

1 //====- X86SpeculativeLoadHardening.cpp - A Spectre v1 mitigation ---------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
17 /// https://googleprojectzero.blogspot.com/2018/01/reading-privileged-memory-with-side.html
20 //===----------------------------------------------------------------------===//
63 #define PASS_KEY "x86-slh"
71 "Number of post-load register values hardened");
78 "x86-speculative-load-hardening",
83 PASS_KEY "-lfence",
90 PASS_KEY "-post-load",
97 PASS_KEY "-fence-call-and-ret",
103 PASS_KEY "-ip",
104 cl::desc("Harden interprocedurally by passing our state in and out of "
109 HardenLoads(PASS_KEY "-loads",
115 PASS_KEY "-indirect",
138 /// our predicate state through the exiting edges.
149 /// Manages the predicate state traced through the program.
164 const TargetRegisterInfo *TRI = nullptr;
235 // don't know what layout-successor relationships the successor has and we
241 assert(Br->getOperand(0).getMBB() == &Succ &&
243 Br->getOperand(0).setMBB(&NewMBB);
264 TII.insertBranch(NewMBB, &Succ, nullptr, Cond, Br->getDebugLoc());
270 "A non-branch successor must have been a layout successor before "
311 // Inherit live-ins from the successor
323 /// FIXME: It's really frustrating that we have to do this, but SSA-form in MIR
325 /// a single predecessor. This makes CFG-updating extremely complex, so here we
345 // that these are stored as a vector making this element-wise removal
350 // removal algorithm here. There should be a better way, but the use-def
411 TII = Subtarget->getInstrInfo();
412 TRI = Subtarget->getRegisterInfo();
414 // FIXME: Support for 32-bit.
437 // predicate state through.
444 // The poison value is required to be an all-ones value for many aspects of
446 const int PoisonVal = -1;
447 PS->PoisonReg = MRI->createVirtualRegister(PS->RC);
448 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV64ri32), PS->PoisonReg)
453 // get a full fence-based mitigation, inject that fence.
456 // incoming misspeculation from the caller. This helps two-fold: the caller
461 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::LFENCE));
474 // Set up the predicate state by extracting it from the incoming stack
476 PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc);
478 // Otherwise, just build the predicate state itself by zeroing a register
479 // as we don't need any initial state.
480 PS->InitialReg = MRI->createVirtualRegister(PS->RC);
481 Register PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass);
482 auto ZeroI = BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV32r0),
486 ZeroI->findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr);
487 assert(ZeroEFLAGSDefOp && ZeroEFLAGSDefOp->isImplicit() &&
489 ZeroEFLAGSDefOp->setIsDead(true);
490 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::SUBREG_TO_REG),
491 PS->InitialReg)
497 // We're going to need to trace predicate state throughout the function's
498 // CFG. Prepare for this by setting up our initial state of PHIs with unique
499 // predecessor entries and all the initial predicate state.
504 PS->SSA.Initialize(PS->InitialReg);
505 PS->SSA.AddAvailableValue(&Entry, PS->InitialReg);
512 // re-capture the predicate state from the throwing code. In the Itanium ABI,
514 // predicate state in the stack pointer, so extract fresh predicate state from
516 // FIXME: Handle non-itanium ABI EH models.
524 PS->SSA.AddAvailableValue(
535 // Then we trace predicate state through the indirect branches.
540 // Now that we have the predicate state available at the start of each block
545 // Now rewrite all the uses of the pred state using the SSA updater to insert
546 // PHIs connecting the state between blocks along the CFG edges.
548 for (MachineOperand &Op : CMovI->operands()) {
549 if (!Op.isReg() || Op.getReg() != PS->InitialReg)
552 PS->SSA.RewriteUse(Op);
561 /// potentially mis-predicted control flow construct.
565 /// practical for any real-world users.
579 if (TermIt == MBB.end() || !TermIt->isBranch())
582 // Add all the non-EH-pad succossors to the blocks we want to harden. We
586 if (!SuccMBB->isEHPad())
591 auto InsertPt = MBB->SkipPHIsAndLabels(MBB->begin());
592 BuildMI(*MBB, InsertPt, DebugLoc(), TII->get(X86::LFENCE));
614 // any unconditional non-indirect branch, and track all conditional edges
617 // condition code in order to inject a "no-op" cmov into that successor
638 // If we see a non-branch terminator, we can't handle anything so bail.
644 // If we see an unconditional branch, reset our state, clear any
683 /// Trace the predicate state through the CFG, instrumenting each conditional
685 /// state.
688 /// uses of the predicate state rewritten into proper SSA form once it is
694 // predicate state into SSA form.
708 // Compute the non-conditional successor as either the target of any
711 UncondBr ? (UncondBr->getOpcode() == X86::JMP_1
712 ? UncondBr->getOperand(0).getMBB()
721 ++SuccCounts[CondBr->getOperand(0).getMBB()];
742 assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
747 // incoming pred state.
748 unsigned CurStateReg = PS->InitialReg;
751 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
754 Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
758 TII->get(CMovOp), UpdatedStateReg)
760 .addReg(PS->PoisonReg)
763 // live-in, mark them as killed.
765 CMovI->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)
766 ->setIsKill(true);
769 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump();
774 if (CurStateReg == PS->InitialReg)
782 // predicate state.
783 PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg);
788 MachineBasicBlock &Succ = *CondBr->getOperand(0).getMBB();
802 --SuccCount;
909 Register Reg = MRI->createVirtualRegister(UnfoldedRC);
914 TII->unfoldMemoryOperand(MF, MI, Reg, /*UnfoldLoad*/ true,
931 NewMI->dump();
942 /// Trace the predicate state through indirect branches, instrumenting them to
943 /// poison the state if a target is reached that does not match the expected
954 /// blocks speculation. This mitigation can replace these retpoline-style
965 // this avoids us having to re-implement the PHI construction logic.
967 TargetAddrSSA.Initialize(MRI->createVirtualRegister(&X86::GR64RegClass));
982 while (MII != MBB.instr_rend() && MII->isDebugInstr())
988 // No terminator or non-branch terminator.
1016 "Support for 16-bit indirect branches is not implemented.");
1019 "Support for 32-bit indirect branches is not implemented.");
1074 // reaching here, and the inserted block will handle the EFLAGS-based
1077 "Cannot check within a block that already has live-in EFLAGS!");
1079 // We can't handle having non-indirect edges into this block unless this is
1092 if (!llvm::all_of(Pred->successors(), [&](MachineBasicBlock *Succ) {
1093 return Succ->isEHPad() || Succ == &MBB;
1098 Pred->dump();
1108 auto InsertPt = Pred->getFirstTerminator();
1109 Register TargetReg = MRI->createVirtualRegister(&X86::GR64RegClass);
1111 !Subtarget->isPositionIndependent()) {
1114 TII->get(X86::MOV64ri32), TargetReg)
1118 LLVM_DEBUG(dbgs() << " Inserting mov: "; AddrI->dump();
1121 auto AddrI = BuildMI(*Pred, InsertPt, DebugLoc(), TII->get(X86::LEA64r),
1130 LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump();
1150 !Subtarget->isPositionIndependent()) {
1152 auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64ri32))
1157 LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n");
1160 Register AddrReg = MRI->createVirtualRegister(&X86::GR64RegClass);
1162 BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::LEA64r), AddrReg)
1170 LLVM_DEBUG(dbgs() << " Inserting lea: "; AddrI->dump(); dbgs() << "\n");
1171 auto CheckI = BuildMI(MBB, InsertPt, DebugLoc(), TII->get(X86::CMP64rr))
1176 LLVM_DEBUG(dbgs() << " Inserting cmp: "; CheckI->dump(); dbgs() << "\n");
1180 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
1182 Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
1184 BuildMI(MBB, InsertPt, DebugLoc(), TII->get(CMovOp), UpdatedStateReg)
1185 .addReg(PS->InitialReg)
1186 .addReg(PS->PoisonReg)
1188 CMovI->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)
1189 ->setIsKill(true);
1191 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
1195 // predicate state.
1196 PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
1199 // Return all the newly inserted cmov instructions of the predicate state.
1207 MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr)) {
1208 return !DefOp->isDead();
1214 const TargetRegisterInfo &TRI) {
1216 // live-in, and then seeing if that def is in turn used.
1219 MI.findRegisterDefOperand(X86::EFLAGS, /*TRI=*/nullptr)) {
1221 if (DefOp->isDead())
1229 if (MI.killsRegister(X86::EFLAGS, &TRI))
1238 /// Trace the predicate state through each of the blocks in the function,
1241 /// We call this routine once the initial predicate state has been established
1243 /// it through the instructions within each basic block, and for non-returning
1244 /// blocks informs the SSA updater about the final state that lives out of the
1246 /// currently valid predicate state. We have to do these two things together
1248 /// the current predicate state directly and update it as it changes.
1254 /// strategies may interact -- later hardening may change what strategy we wish
1260 /// Second, we actively trace the predicate state through the block, applying
1274 // Track the set of load-dependent registers through the basic block. Because
1287 // as that often successfully re-uses hardened addresses and minimizes
1336 // If we have at least one (non-frame-index, non-RIP) register operand,
1337 // and neither operand is load-dependent, we need to check the load.
1357 // If post-load hardening is enabled, this load is compatible with
1358 // post-load hardening, and we aren't already going to harden one of the
1359 // address registers, queue it up to be hardened post-load. Notably,
1374 // operands as being address-hardened.
1386 // Now re-walk the instructions in the basic block, and apply whichever
1389 // which we will do post-load hardening and can defer it in certain
1413 assert(!MI.isCall() && "Must not try to post-load harden a call!");
1415 // If this is a data-invariant load and there is no EFLAGS
1440 // Mark the resulting hardened register as such so we don't re-harden.
1470 // state into a call and recovering it after the call returns (unless this
1481 // Currently, we only track data-dependent loads within a basic block.
1497 // FIXME: Hard coding this to a 32-bit register class seems weird, but matches
1499 Register Reg = MRI->createVirtualRegister(&X86::GR32RegClass);
1502 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), Reg).addReg(X86::EFLAGS);
1515 BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), X86::EFLAGS).addReg(Reg);
1519 /// Takes the current predicate state (in a register) and merges it into the
1520 /// stack pointer. The state is essentially a single bit, but we merge this in
1521 /// a way that won't form non-canonical pointers and also will be preserved
1526 Register TmpReg = MRI->createVirtualRegister(PS->RC);
1528 // to stay canonical on 64-bit. We should compute this somehow and support
1529 // 32-bit as part of that.
1530 auto ShiftI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHL64ri), TmpReg)
1533 ShiftI->addRegisterDead(X86::EFLAGS, TRI);
1535 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), X86::RSP)
1538 OrI->addRegisterDead(X86::EFLAGS, TRI);
1542 /// Extracts the predicate state stored in the high bits of the stack pointer.
1546 Register PredStateReg = MRI->createVirtualRegister(PS->RC);
1547 Register TmpReg = MRI->createVirtualRegister(PS->RC);
1549 // We know that the stack pointer will have any preserved predicate state in
1552 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), TmpReg)
1555 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SAR64ri), PredStateReg)
1557 .addImm(TRI->getRegSizeInBits(*PS->RC) - 1);
1558 ShiftI->addRegisterDead(X86::EFLAGS, TRI);
1571 // live-in, and then seeing if that def is in turn used.
1572 bool EFLAGSLive = isEFLAGSLive(MBB, MI.getIterator(), *TRI);
1592 // For both RIP-relative addressed loads or absolute loads, we cannot
1597 // dynamic address being the base plus -1 because we can't mutate the
1598 // segment register here. This allows the signed 32-bit offset to point at
1599 // valid segment-relative addresses and load them successfully.
1602 << (BaseMO.getReg() == X86::RIP ? "RIP-relative" : "no-base")
1612 HardenOpRegs.front()->getReg() != IndexMO.getReg()))
1618 HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) &&
1624 auto It = AddrRegToHardenedReg.find(Op->getReg());
1630 Op->setReg(It->second);
1637 // Compute the current predicate state.
1638 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
1646 if (EFLAGSLive && !Subtarget->hasBMI2()) {
1652 Register OpReg = Op->getReg();
1653 auto *OpRC = MRI->getRegClass(OpReg);
1654 Register TmpReg = MRI->createVirtualRegister(OpRC);
1658 if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) ||
1659 OpRC->hasSuperClassEq(&X86::VR256RegClass))) {
1660 assert(Subtarget->hasAVX2() && "AVX2-specific register classes!");
1661 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass);
1663 // Move our state into a vector register.
1664 // FIXME: We could skip this at the cost of longer encodings with AVX-512
1666 Register VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass);
1668 BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg)
1672 LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n");
1675 Register VBStateReg = MRI->createVirtualRegister(OpRC);
1677 TII->get(Is128Bit ? X86::VPBROADCASTQrr
1683 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
1686 // Merge our potential poison state into the value with a vector or.
1689 TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg)
1694 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1695 } else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) ||
1696 OpRC->hasSuperClassEq(&X86::VR256XRegClass) ||
1697 OpRC->hasSuperClassEq(&X86::VR512RegClass)) {
1698 assert(Subtarget->hasAVX512() && "AVX512-specific register classes!");
1699 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass);
1700 bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass);
1702 assert(Subtarget->hasVLX() && "AVX512VL-specific register classes!");
1704 // Broadcast our state into a vector register.
1705 Register VStateReg = MRI->createVirtualRegister(OpRC);
1710 BuildMI(MBB, InsertPt, Loc, TII->get(BroadcastOp), VStateReg)
1714 LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump();
1717 // Merge our potential poison state into the value with a vector or.
1720 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOp), TmpReg)
1725 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1727 // FIXME: Need to support GR32 here for 32-bit code.
1728 assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
1732 // Merge our potential poison state into the value with an or.
1733 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg)
1736 OrI->addRegisterDead(X86::EFLAGS, TRI);
1738 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1743 BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg)
1748 LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump();
1754 assert(!AddrRegToHardenedReg.count(Op->getReg()) &&
1756 AddrRegToHardenedReg[Op->getReg()] = TmpReg;
1757 Op->setReg(TmpReg);
1769 "Cannot get here with a non-invariant load!");
1776 [&](MachineInstr &MI) -> std::optional<MachineInstr *> {
1783 for (MachineInstr &UseMI : MRI->use_instructions(DefReg)) {
1788 // If we've already decided to harden a non-load, we must have sunk
1789 // some other post-load hardened instruction to it and it must itself
1790 // be data-invariant.
1861 auto *RC = MRI->getRegClass(Reg);
1862 int RegBytes = TRI->getRegSizeInBits(*RC) / 8;
1864 // We don't support post-load hardening of vectors.
1874 // end up both with a NOREX and REX-only register as operands to the hardening
1886 return RC->hasSuperClassEq(GPRRegClasses[RegIdx]);
1891 /// This is the low-level logic to fully harden a value sitting in a register
1898 /// larger than the predicate state register. FIXME: We should support vector
1899 /// registers here by broadcasting the predicate state.
1908 auto *RC = MRI->getRegClass(Reg);
1909 int Bytes = TRI->getRegSizeInBits(*RC) / 8;
1910 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
1914 // FIXME: Need to teach this about 32-bit mode.
1918 Register NarrowStateReg = MRI->createVirtualRegister(RC);
1919 BuildMI(MBB, InsertPt, Loc, TII->get(TargetOpcode::COPY), NarrowStateReg)
1925 if (isEFLAGSLive(MBB, InsertPt, *TRI))
1928 Register NewReg = MRI->createVirtualRegister(RC);
1931 auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(OrOpCode), NewReg)
1934 OrI->addRegisterDead(X86::EFLAGS, TRI);
1936 LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n");
1946 /// We can harden a non-leaking load into a register without touching the
1959 auto *DefRC = MRI->getRegClass(OldDefReg);
1964 Register UnhardenedReg = MRI->createVirtualRegister(DefRC);
1975 MRI->replaceRegWith(/*FromReg*/ OldDefReg, /*ToReg*/ HardenedReg);
1996 /// benefit: it allows us to pass the predicate state accumulated in this
2001 /// speculatively even during a BCBS-attacked return until the steering takes
2003 /// predicate state from the stack pointer and continue to harden loads.
2014 // Take our predicate state, shift it to the high 17 bits (so that we keep
2017 mergePredStateIntoSP(MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&MBB));
2020 /// Trace the predicate state through a call.
2025 /// First, we need to send the predicate state into the called function. We do
2031 /// extract the predicate state from the high bits of the stack pointer after
2039 /// https://christian-rossow.de/publications/ret2spec-ccs2018.pdf
2043 /// advantage of the red-zone to load the return address from `8(%rsp)` where it
2049 /// state.
2067 BuildMI(MBB, std::next(InsertPt), Loc, TII->get(X86::LFENCE));
2073 // First, we transfer the predicate state into the called function by merging
2074 // it into the stack pointer. This will kill the current def of the state.
2075 Register StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2099 if (!Subtarget->getFrameLowering()->has128ByteRedZone(MF) ||
2107 // when a callee-saved register is used and the callee doesn't push it onto
2119 ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC);
2121 !Subtarget->isPositionIndependent()) {
2122 BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64ri32), ExpectedRetAddrReg)
2125 BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ExpectedRetAddrReg)
2137 // If we didn't pre-compute the expected return address into a register, then
2142 ExpectedRetAddrReg = MRI->createVirtualRegister(AddrRC);
2143 BuildMI(MBB, InsertPt, Loc, TII->get(X86::MOV64rm), ExpectedRetAddrReg)
2147 .addImm(/*Displacement*/ -8) // The stack pointer has been popped, so
2148 // the return address is 8-bytes past it.
2152 // Now we extract the callee's predicate state from the stack pointer.
2159 !Subtarget->isPositionIndependent()) {
2162 BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64ri32))
2166 Register ActualRetAddrReg = MRI->createVirtualRegister(AddrRC);
2167 BuildMI(MBB, InsertPt, Loc, TII->get(X86::LEA64r), ActualRetAddrReg)
2173 BuildMI(MBB, InsertPt, Loc, TII->get(X86::CMP64rr))
2178 // Now conditionally update the predicate state we just extracted if we ended
2180 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
2183 Register UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
2184 auto CMovI = BuildMI(MBB, InsertPt, Loc, TII->get(CMovOp), UpdatedStateReg)
2186 .addReg(PS->PoisonReg)
2188 CMovI->findRegisterUseOperand(X86::EFLAGS, /*TRI=*/nullptr)->setIsKill(true);
2190 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
2192 PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
2204 /// definitively treated as needing post-load hardening. While address hardening
2208 /// have an opportunity to post-load harden here, we just need to scan for cases