Lines Matching +full:switching +full:- +full:freq
1 //===- MLRegAllocEvictAdvisor.cpp - ML eviction advisor -------------------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
48 #define DEBUG_TYPE "ml-regalloc"
59 "regalloc-evict-interactive-channel-base", cl::Hidden,
62 "have the name <regalloc-evict-interactive-channel-base>.in, while the "
64 "<regalloc-evict-interactive-channel-base>.out"));
72 "regalloc-training-log", cl::Hidden,
76 "regalloc-model", cl::Hidden,
80 "regalloc-enable-development-features", cl::Hidden,
90 /// this happens only in development mode. It's a no-op otherwise.
141 // --------------
143 // --------------
178 "bb freq - weighed nr defs and uses") \
180 "bb freq - weighed nr of reads, normalized") \
182 "bb feq - weighed nr of writes, normalized") \
184 "bb freq - weighed nr of uses that are both read and writes, normalized") \
186 "bb freq - weighed nr of uses that are indvars, normalized") \
188 "bb freq - weighed nr of uses that are hints, normalized") \
190 "the freq in the start block, normalized") \
192 "freq of end block, normalized") \
194 "hottest BB freq, normalized") \
246 // various phys regs won't be available. It's easier (maintenance-wise) to
247 // bulk-reset the state of the evaluator each time we are about to use it
268 // Per-live interval components that get aggregated into the feature values
298 // The assumption is that if the Runner could not be constructed, we emit-ed
333 // Point-in-time: we didn't learn this, so we always delegate to the
355 // This could be static and shared, but its initialization is non-trivial.
367 // Release (AOT) - specifics
384 return R->getAdvisorMode() == AdvisorMode::Release;
417 // Development mode-specifics
475 return R->getAdvisorMode() == AdvisorMode::Development;
480 if (!Log || !Log->hasAnyObservationForContext(MF.getName()))
484 // this invariant ever changes, we can implement at that time switching
486 if (Log->currentContext() != MF.getName()) {
490 if (Log->hasObservationInProgress())
491 Log->logReward<float>(GetReward());
530 append_range(LFS, MUTR->extraOutputsForLoggingSpecs());
546 Log->switchContext(MF.getName());
579 assert(this->Runner);
580 Runner->switchContext(MF.getName());
593 int64_t Ret = Runner->evaluate<int64_t>();
605 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) {
610 const bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
618 for (MCRegUnit Unit : TRI->regunits(PhysReg)) {
619 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, Unit);
629 assert(Intf->reg().isVirtual() &&
637 if (FixedRegisters.count(Intf->reg()))
643 (Intf->isSpillable() ||
644 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg())) <
646 MRI->getRegClass(Intf->reg())));
648 unsigned IntfCascade = RA.getExtraInfo().getCascade(Intf->reg());
655 LocalIntfs += (IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
675 // max<uint8_t>, then any of the costs of the legally-evictable intervals
682 // Number of available candidates - if 0, no need to continue.
688 // Track the index->register mapping because AllocationOrder doesn't do that
700 // Same overal idea as in the default eviction policy - we visit the values
704 // features at - in AllocationOrder order.
741 [this](SlotIndex InputIndex) -> int {
743 LIS->getInstructionFromIndex(InputIndex);
745 return -1;
747 return CurrentMachineInstruction->getOpcode();
749 [this](SlotIndex InputIndex) -> float {
751 LIS->getInstructionFromIndex(InputIndex);
753 CurrentMachineInstruction->getParent());
755 [this](SlotIndex InputIndex) -> MachineBasicBlock * {
757 LIS->getInstructionFromIndex(InputIndex);
758 return CurrentMachineInstruction->getParent();
762 LIS->getSlotIndexes()->getLastIndex());
773 Runner->getTensor<float>(FeatureIndex)[Pos] /= Largest[FeatureIndex];
776 *Runner->getTensor<float>(FeatureIDs::progress) =
799 LIFeatureComponents &Ret = I.first->getSecond();
807 I = MRI->reg_instr_nodbg_begin(LI.reg()),
808 E = MRI->reg_instr_nodbg_end();
816 if (MI->isIdentityCopy() || MI->isImplicitDef())
820 std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg());
822 float Freq = MBFI.getBlockFreqRelativeToEntryBlock(MI->getParent());
823 Ret.HottestBlockFreq = std::max(Freq, Ret.HottestBlockFreq);
825 Ret.R += (Reads && !Writes) * Freq;
826 Ret.W += (!Reads && Writes) * Freq;
827 Ret.RW += (Reads && Writes) * Freq;
829 auto *MBB = MI->getParent();
831 bool IsExiting = Loop ? Loop->isLoopExiting(MBB) : false;
833 if (Writes && IsExiting && LIS->isLiveOutOfMBB(LI, MBB))
834 Ret.IndVarUpdates += Freq;
836 if (MI->isCopy() && VirtRegAuxInfo::copyHint(MI, LI.reg(), TRI, *MRI))
837 Ret.HintWeights += Freq;
864 SlotIndex EndSI = LIS->getSlotIndexes()->getZeroIndex();
865 SlotIndex StartSI = LIS->getSlotIndexes()->getLastIndex();
885 NrBrokenHints += VRM->hasPreferredPhys(LI.reg());
908 MBFI.getBlockFreqRelativeToEntryBlock(LIS->getMBBFromIndex(StartSI));
909 if (EndSI >= LIS->getSlotIndexes()->getLastIndex())
910 EndSI = LIS->getSlotIndexes()->getLastIndex().getPrevIndex();
912 MBFI.getBlockFreqRelativeToEntryBlock(LIS->getMBBFromIndex(EndSI));
918 Runner->getTensor<TYPE>(FeatureIDs::ID)[Pos] = static_cast<TYPE>(VAL); \
957 // 1 - A vector of size max instruction count. It contains the opcodes of the
960 // 2 - A binary mapping matrix of size (LR count * max
963 // 3 - A vector of size max supported MBB count storing MBB frequencies,
965 // 4 - A vector of size max instruction count of indices to members of the MBB
992 if (CurrentOpcode == -1) {
1011 RegallocRunner->getTensor<int64_t>(InstructionsIndex)[InstructionIndex] =
1015 RegallocRunner->getTensor<int64_t>(
1033 RegallocRunner->getTensor<int64_t>(
1048 if (CurrentSegmentIndex == LRPosInfo.size() - 1 ||
1073 RegallocRunner->getTensor<float>(MBBFreqIndex)[CurrentMBBIndex] =
1075 RegallocRunner->getTensor<int64_t>(
1080 // Development mode-specific implementations
1114 if (Log->hasObservationInProgress())
1115 Log->logReward<float>(0.0);
1117 Log->startObservation();
1123 Log->logTensorValue(CurrentFeature,
1128 for (size_t I = 0; I < MUTR->extraOutputsForLoggingSpecs().size();
1130 Log->logTensorValue(
1132 reinterpret_cast<const char *>(MUTR->getUntypedExtraOutputValue(I)));
1134 Log->logTensorValue(CurrentFeature, reinterpret_cast<const char *>(&Ret));
1135 Log->endObservation();