Lines Matching defs:SUnit
672 for (const SUnit *SU : Queue)
691 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
692 SUnit *SuccSU = SuccEdge->getSUnit();
719 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
728 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
729 SUnit *PredSU = PredEdge->getSUnit();
756 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
839 SmallVector<SUnit*, 8> TopRoots, BotRoots;
856 SUnit *SU = SchedImpl->pickNode(IsTopNode);
910 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
911 SmallVectorImpl<SUnit*> &BotRoots) {
912 for (SUnit &SU : SUnits) {
918 // A SUnit is ready to top schedule if it has no predecessors.
921 // A SUnit is ready to bottom schedule if it has no successors.
929 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
930 ArrayRef<SUnit*> BotRoots) {
938 for (SUnit *SU : TopRoots)
943 for (SmallVectorImpl<SUnit*>::const_reverse_iterator
959 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
1007 SUnit *SU = getSUnit(&MI);
1025 SUnit *SU = getSUnit(&MI);
1027 dbgs() << "Missing SUnit\n";
1089 SUnit *SU = getSUnit(&MI);
1107 SUnit *SU = getSUnit(&MI);
1109 dbgs() << "Missing SUnit\n";
1171 if (SUnit *SU = getSUnit(&MI))
1174 dbgs() << "Missing SUnit\n";
1188 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
1255 for (SUnit &SU : SUnits)
1327 updateScheduledPressure(const SUnit *SU,
1371 SUnit &SU = *V2SU.SU;
1403 SUnit *SU = V2SU.SU;
1426 for (const SUnit &SU : SUnits) {
1462 SmallVector<SUnit*, 8> TopRoots, BotRoots;
1479 SUnit *SU = SchedImpl->pickNode(IsTopNode);
1591 const SUnit *DefSU = getSUnit(DefMI);
1600 SUnit *SU = V2SU.SU;
1635 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1636 ArrayRef<SUnit*> BotRoots) {
1645 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1731 SUnit *SU;
1737 MemOpInfo(SUnit *SU, ArrayRef<const MachineOperand *> BaseOps,
1794 void collectMemOpRecords(std::vector<SUnit> &SUnits,
1847 // Keep track of the current cluster length and bytes for each SUnit.
1885 SUnit *SUa = MemOpa.SU;
1886 SUnit *SUb = MemOpb.SU;
1936 std::vector<SUnit> &SUnits, SmallVectorImpl<MemOpInfo> &MemOpRecords) {
2044 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
2078 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
2152 SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
2158 SmallVector<SUnit*,8> LocalUses;
2161 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
2173 SmallVector<SUnit*,8> GlobalUses;
2176 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
2188 for (SUnit *LU : LocalUses) {
2193 for (SUnit *GU : GlobalUses) {
2213 for (SUnit &SU : DAG->SUnits) {
2284 for (SUnit &SU : DAG->SUnits) {
2328 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
2335 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
2454 bool SchedBoundary::checkHazard(SUnit *SU) {
2503 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
2504 SUnit *LateSU = nullptr;
2506 for (SUnit *SU : ReadySUs) {
2550 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
2552 assert(SU->getInstr() && "Scheduled SUnit must have instr");
2674 /// Move the boundary of scheduled code by one SUnit.
2675 void SchedBoundary::bumpNode(SUnit *SU) {
2848 SUnit *SU = *(Pending.begin() + I);
2867 void SchedBoundary::removeReady(SUnit *SU) {
2879 SUnit *SchedBoundary::pickOnlyChoice() {
3373 for (const SUnit *SU : Bot.Available) {
3428 unsigned getWeakLeft(const SUnit *SU, bool isTop) {
3439 int biasPhysReg(const SUnit *SU, bool isTop) {
3476 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
3520 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3578 const SUnit *CandNextClusterSU =
3580 const SUnit *TryCandNextClusterSU =
3632 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
3643 for (SUnit *SU : Q) {
3660 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
3663 if (SUnit *SU = Bot.pickOnlyChoice()) {
3668 if (SUnit *SU = Top.pickOnlyChoice()) {
3738 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
3744 SUnit *SU;
3798 void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) {
3810 SUnit *DepSU = Dep.getSUnit();
3829 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3925 for (const SUnit *SU : Bot.Available) {
3938 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3954 const SUnit *CandNextClusterSU =
3956 const SUnit *TryCandNextClusterSU =
3992 for (SUnit *SU : Q) {
4005 SUnit *PostGenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
4011 if (SUnit *SU = Bot.pickOnlyChoice()) {
4016 if (SUnit *SU = Top.pickOnlyChoice()) {
4086 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
4092 SUnit *SU;
4143 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
4182 bool operator()(const SUnit *A, const SUnit *B) const {
4209 std::vector<SUnit*> ReadyQ;
4232 SUnit *pickNode(bool &IsTopNode) override {
4235 SUnit *SU = ReadyQ.back();
4257 void schedNode(SUnit *SU, bool IsTopNode) override {
4261 void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
4263 void releaseBottomNode(SUnit *SU) override {
4294 bool operator()(SUnit *A, SUnit *B) const {
4310 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false>>
4314 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true>>
4329 SUnit *pickNode(bool &IsTopNode) override {
4330 SUnit *SU;
4351 void schedNode(SUnit *SU, bool IsTopNode) override {}
4353 void releaseTopNode(SUnit *SU) override {
4356 void releaseBottomNode(SUnit *SU) override {
4398 static bool isNodeHidden(const SUnit *Node, const ScheduleDAG *G) {
4407 static std::string getEdgeAttributes(const SUnit *Node,
4417 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
4429 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
4433 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {