1 //==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file describes an abstract interface used to get information about a 10 // target machines register file. This information is used for a variety of 11 // purposed, especially register allocation. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H 16 #define LLVM_CODEGEN_TARGETREGISTERINFO_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/StringRef.h" 21 #include "llvm/ADT/iterator_range.h" 22 #include "llvm/CodeGen/MachineBasicBlock.h" 23 #include "llvm/CodeGen/RegisterBank.h" 24 #include "llvm/IR/CallingConv.h" 25 #include "llvm/MC/LaneBitmask.h" 26 #include "llvm/MC/MCRegisterInfo.h" 27 #include "llvm/Support/ErrorHandling.h" 28 #include "llvm/Support/MathExtras.h" 29 #include "llvm/Support/Printable.h" 30 #include <cassert> 31 #include <cstdint> 32 33 namespace llvm { 34 35 class BitVector; 36 class DIExpression; 37 class LiveRegMatrix; 38 class MachineFunction; 39 class MachineInstr; 40 class RegScavenger; 41 class VirtRegMap; 42 class LiveIntervals; 43 class LiveInterval; 44 class TargetRegisterClass { 45 public: 46 using iterator = const MCPhysReg *; 47 using const_iterator = const MCPhysReg *; 48 49 // Instance variables filled by tablegen, do not use! 50 const MCRegisterClass *MC; 51 const uint32_t *SubClassMask; 52 const uint16_t *SuperRegIndices; 53 const LaneBitmask LaneMask; 54 /// Classes with a higher priority value are assigned first by register 55 /// allocators using a greedy heuristic. The value is in the range [0,31]. 56 const uint8_t AllocationPriority; 57 58 // Change allocation priority heuristic used by greedy. 59 const bool GlobalPriority; 60 61 /// Configurable target specific flags. 62 const uint8_t TSFlags; 63 /// Whether the class supports two (or more) disjunct subregister indices. 64 const bool HasDisjunctSubRegs; 65 /// Whether a combination of subregisters can cover every register in the 66 /// class. See also the CoveredBySubRegs description in Target.td. 67 const bool CoveredBySubRegs; 68 const unsigned *SuperClasses; 69 const uint16_t SuperClassesSize; 70 ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&); 71 72 /// Return the register class ID number. 73 unsigned getID() const { return MC->getID(); } 74 75 /// begin/end - Return all of the registers in this class. 76 /// 77 iterator begin() const { return MC->begin(); } 78 iterator end() const { return MC->end(); } 79 80 /// Return the number of registers in this class. 81 unsigned getNumRegs() const { return MC->getNumRegs(); } 82 83 ArrayRef<MCPhysReg> getRegisters() const { 84 return ArrayRef(begin(), getNumRegs()); 85 } 86 87 /// Return the specified register in the class. 88 MCRegister getRegister(unsigned i) const { 89 return MC->getRegister(i); 90 } 91 92 /// Return true if the specified register is included in this register class. 93 /// This does not include virtual registers. 94 bool contains(Register Reg) const { 95 /// FIXME: Historically this function has returned false when given vregs 96 /// but it should probably only receive physical registers 97 if (!Reg.isPhysical()) 98 return false; 99 return MC->contains(Reg.asMCReg()); 100 } 101 102 /// Return true if both registers are in this class. 103 bool contains(Register Reg1, Register Reg2) const { 104 /// FIXME: Historically this function has returned false when given a vregs 105 /// but it should probably only receive physical registers 106 if (!Reg1.isPhysical() || !Reg2.isPhysical()) 107 return false; 108 return MC->contains(Reg1.asMCReg(), Reg2.asMCReg()); 109 } 110 111 /// Return the cost of copying a value between two registers in this class. 112 /// A negative number means the register class is very expensive 113 /// to copy e.g. status flag register classes. 114 int getCopyCost() const { return MC->getCopyCost(); } 115 116 /// Return true if this register class may be used to create virtual 117 /// registers. 118 bool isAllocatable() const { return MC->isAllocatable(); } 119 120 /// Return true if this register class has a defined BaseClassOrder. 121 bool isBaseClass() const { return MC->isBaseClass(); } 122 123 /// Return true if the specified TargetRegisterClass 124 /// is a proper sub-class of this TargetRegisterClass. 125 bool hasSubClass(const TargetRegisterClass *RC) const { 126 return RC != this && hasSubClassEq(RC); 127 } 128 129 /// Returns true if RC is a sub-class of or equal to this class. 130 bool hasSubClassEq(const TargetRegisterClass *RC) const { 131 unsigned ID = RC->getID(); 132 return (SubClassMask[ID / 32] >> (ID % 32)) & 1; 133 } 134 135 /// Return true if the specified TargetRegisterClass is a 136 /// proper super-class of this TargetRegisterClass. 137 bool hasSuperClass(const TargetRegisterClass *RC) const { 138 return RC->hasSubClass(this); 139 } 140 141 /// Returns true if RC is a super-class of or equal to this class. 142 bool hasSuperClassEq(const TargetRegisterClass *RC) const { 143 return RC->hasSubClassEq(this); 144 } 145 146 /// Returns a bit vector of subclasses, including this one. 147 /// The vector is indexed by class IDs. 148 /// 149 /// To use it, consider the returned array as a chunk of memory that 150 /// contains an array of bits of size NumRegClasses. Each 32-bit chunk 151 /// contains a bitset of the ID of the subclasses in big-endian style. 152 153 /// I.e., the representation of the memory from left to right at the 154 /// bit level looks like: 155 /// [31 30 ... 1 0] [ 63 62 ... 33 32] ... 156 /// [ XXX NumRegClasses NumRegClasses - 1 ... ] 157 /// Where the number represents the class ID and XXX bits that 158 /// should be ignored. 159 /// 160 /// See the implementation of hasSubClassEq for an example of how it 161 /// can be used. 162 const uint32_t *getSubClassMask() const { 163 return SubClassMask; 164 } 165 166 /// Returns a 0-terminated list of sub-register indices that project some 167 /// super-register class into this register class. The list has an entry for 168 /// each Idx such that: 169 /// 170 /// There exists SuperRC where: 171 /// For all Reg in SuperRC: 172 /// this->contains(Reg:Idx) 173 const uint16_t *getSuperRegIndices() const { 174 return SuperRegIndices; 175 } 176 177 /// Returns a list of super-classes. The 178 /// classes are ordered by ID which is also a topological ordering from large 179 /// to small classes. The list does NOT include the current class. 180 ArrayRef<unsigned> superclasses() const { 181 return ArrayRef(SuperClasses, SuperClassesSize); 182 } 183 184 /// Return true if this TargetRegisterClass is a subset 185 /// class of at least one other TargetRegisterClass. 186 bool isASubClass() const { return SuperClasses != nullptr; } 187 188 /// Returns the preferred order for allocating registers from this register 189 /// class in MF. The raw order comes directly from the .td file and may 190 /// include reserved registers that are not allocatable. 191 /// Register allocators should also make sure to allocate 192 /// callee-saved registers only after all the volatiles are used. The 193 /// RegisterClassInfo class provides filtered allocation orders with 194 /// callee-saved registers moved to the end. 195 /// 196 /// The MachineFunction argument can be used to tune the allocatable 197 /// registers based on the characteristics of the function, subtarget, or 198 /// other criteria. 199 /// 200 /// By default, this method returns all registers in the class. 201 ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const { 202 return OrderFunc ? OrderFunc(MF) : getRegisters(); 203 } 204 205 /// Returns the combination of all lane masks of register in this class. 206 /// The lane masks of the registers are the combination of all lane masks 207 /// of their subregisters. Returns 1 if there are no subregisters. 208 LaneBitmask getLaneMask() const { 209 return LaneMask; 210 } 211 }; 212 213 /// Extra information, not in MCRegisterDesc, about registers. 214 /// These are used by codegen, not by MC. 215 struct TargetRegisterInfoDesc { 216 const uint8_t *CostPerUse; // Extra cost of instructions using register. 217 unsigned NumCosts; // Number of cost values associated with each register. 218 const bool 219 *InAllocatableClass; // Register belongs to an allocatable regclass. 220 }; 221 222 /// Each TargetRegisterClass has a per register weight, and weight 223 /// limit which must be less than the limits of its pressure sets. 224 struct RegClassWeight { 225 unsigned RegWeight; 226 unsigned WeightLimit; 227 }; 228 229 /// TargetRegisterInfo base class - We assume that the target defines a static 230 /// array of TargetRegisterDesc objects that represent all of the machine 231 /// registers that the target has. As such, we simply have to track a pointer 232 /// to this array so that we can turn register number into a register 233 /// descriptor. 234 /// 235 class TargetRegisterInfo : public MCRegisterInfo { 236 public: 237 using regclass_iterator = const TargetRegisterClass * const *; 238 using vt_iterator = const MVT::SimpleValueType *; 239 struct RegClassInfo { 240 unsigned RegSize, SpillSize, SpillAlignment; 241 unsigned VTListOffset; 242 }; 243 244 /// SubRegCoveredBits - Emitted by tablegen: bit range covered by a subreg 245 /// index, -1 in any being invalid. 246 struct SubRegCoveredBits { 247 uint16_t Offset; 248 uint16_t Size; 249 }; 250 251 private: 252 const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen 253 const char *const *SubRegIndexNames; // Names of subreg indexes. 254 const SubRegCoveredBits *SubRegIdxRanges; // Pointer to the subreg covered 255 // bit ranges array. 256 257 // Pointer to array of lane masks, one per sub-reg index. 258 const LaneBitmask *SubRegIndexLaneMasks; 259 260 regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses 261 LaneBitmask CoveringLanes; 262 const RegClassInfo *const RCInfos; 263 const MVT::SimpleValueType *const RCVTLists; 264 unsigned HwMode; 265 266 protected: 267 TargetRegisterInfo(const TargetRegisterInfoDesc *ID, regclass_iterator RCB, 268 regclass_iterator RCE, const char *const *SRINames, 269 const SubRegCoveredBits *SubIdxRanges, 270 const LaneBitmask *SRILaneMasks, LaneBitmask CoveringLanes, 271 const RegClassInfo *const RCIs, 272 const MVT::SimpleValueType *const RCVTLists, 273 unsigned Mode = 0); 274 virtual ~TargetRegisterInfo(); 275 276 public: 277 /// Return the number of registers for the function. (may overestimate) 278 virtual unsigned getNumSupportedRegs(const MachineFunction &) const { 279 return getNumRegs(); 280 } 281 282 // Register numbers can represent physical registers, virtual registers, and 283 // sometimes stack slots. The unsigned values are divided into these ranges: 284 // 285 // 0 Not a register, can be used as a sentinel. 286 // [1;2^30) Physical registers assigned by TableGen. 287 // [2^30;2^31) Stack slots. (Rarely used.) 288 // [2^31;2^32) Virtual registers assigned by MachineRegisterInfo. 289 // 290 // Further sentinels can be allocated from the small negative integers. 291 // DenseMapInfo<unsigned> uses -1u and -2u. 292 293 /// Return the size in bits of a register from class RC. 294 TypeSize getRegSizeInBits(const TargetRegisterClass &RC) const { 295 return TypeSize::getFixed(getRegClassInfo(RC).RegSize); 296 } 297 298 /// Return the size in bytes of the stack slot allocated to hold a spilled 299 /// copy of a register from class RC. 300 unsigned getSpillSize(const TargetRegisterClass &RC) const { 301 return getRegClassInfo(RC).SpillSize / 8; 302 } 303 304 /// Return the minimum required alignment in bytes for a spill slot for 305 /// a register of this class. 306 Align getSpillAlign(const TargetRegisterClass &RC) const { 307 return Align(getRegClassInfo(RC).SpillAlignment / 8); 308 } 309 310 /// Return true if the given TargetRegisterClass has the ValueType T. 311 bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const { 312 for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) 313 if (MVT(*I) == T) 314 return true; 315 return false; 316 } 317 318 /// Return true if the given TargetRegisterClass is compatible with LLT T. 319 bool isTypeLegalForClass(const TargetRegisterClass &RC, LLT T) const { 320 for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) { 321 MVT VT(*I); 322 if (VT == MVT::Untyped) 323 return true; 324 325 if (LLT(VT) == T) 326 return true; 327 } 328 return false; 329 } 330 331 /// Loop over all of the value types that can be represented by values 332 /// in the given register class. 333 vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const { 334 return &RCVTLists[getRegClassInfo(RC).VTListOffset]; 335 } 336 337 vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const { 338 vt_iterator I = legalclasstypes_begin(RC); 339 while (*I != MVT::Other) 340 ++I; 341 return I; 342 } 343 344 /// Returns the Register Class of a physical register of the given type, 345 /// picking the most sub register class of the right type that contains this 346 /// physreg. 347 const TargetRegisterClass *getMinimalPhysRegClass(MCRegister Reg, 348 MVT VT = MVT::Other) const; 349 350 /// Returns the common Register Class of two physical registers of the given 351 /// type, picking the most sub register class of the right type that contains 352 /// these two physregs. 353 const TargetRegisterClass * 354 getCommonMinimalPhysRegClass(MCRegister Reg1, MCRegister Reg2, 355 MVT VT = MVT::Other) const; 356 357 /// Returns the Register Class of a physical register of the given type, 358 /// picking the most sub register class of the right type that contains this 359 /// physreg. If there is no register class compatible with the given type, 360 /// returns nullptr. 361 const TargetRegisterClass *getMinimalPhysRegClassLLT(MCRegister Reg, 362 LLT Ty = LLT()) const; 363 364 /// Returns the common Register Class of two physical registers of the given 365 /// type, picking the most sub register class of the right type that contains 366 /// these two physregs. If there is no register class compatible with the 367 /// given type, returns nullptr. 368 const TargetRegisterClass * 369 getCommonMinimalPhysRegClassLLT(MCRegister Reg1, MCRegister Reg2, 370 LLT Ty = LLT()) const; 371 372 /// Return the maximal subclass of the given register class that is 373 /// allocatable or NULL. 374 const TargetRegisterClass * 375 getAllocatableClass(const TargetRegisterClass *RC) const; 376 377 /// Returns a bitset indexed by register number indicating if a register is 378 /// allocatable or not. If a register class is specified, returns the subset 379 /// for the class. 380 BitVector getAllocatableSet(const MachineFunction &MF, 381 const TargetRegisterClass *RC = nullptr) const; 382 383 /// Get a list of cost values for all registers that correspond to the index 384 /// returned by RegisterCostTableIndex. 385 ArrayRef<uint8_t> getRegisterCosts(const MachineFunction &MF) const { 386 unsigned Idx = getRegisterCostTableIndex(MF); 387 unsigned NumRegs = getNumRegs(); 388 assert(Idx < InfoDesc->NumCosts && "CostPerUse index out of bounds"); 389 390 return ArrayRef(&InfoDesc->CostPerUse[Idx * NumRegs], NumRegs); 391 } 392 393 /// Return true if the register is in the allocation of any register class. 394 bool isInAllocatableClass(MCRegister RegNo) const { 395 return InfoDesc->InAllocatableClass[RegNo]; 396 } 397 398 /// Return the human-readable symbolic target-specific 399 /// name for the specified SubRegIndex. 400 const char *getSubRegIndexName(unsigned SubIdx) const { 401 assert(SubIdx && SubIdx < getNumSubRegIndices() && 402 "This is not a subregister index"); 403 return SubRegIndexNames[SubIdx-1]; 404 } 405 406 /// Get the size of the bit range covered by a sub-register index. 407 /// If the index isn't continuous, return the sum of the sizes of its parts. 408 /// If the index is used to access subregisters of different sizes, return -1. 409 unsigned getSubRegIdxSize(unsigned Idx) const; 410 411 /// Get the offset of the bit range covered by a sub-register index. 412 /// If an Offset doesn't make sense (the index isn't continuous, or is used to 413 /// access sub-registers at different offsets), return -1. 414 unsigned getSubRegIdxOffset(unsigned Idx) const; 415 416 /// Return a bitmask representing the parts of a register that are covered by 417 /// SubIdx \see LaneBitmask. 418 /// 419 /// SubIdx == 0 is allowed, it has the lane mask ~0u. 420 LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const { 421 assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index"); 422 return SubRegIndexLaneMasks[SubIdx]; 423 } 424 425 /// Try to find one or more subregister indexes to cover \p LaneMask. 426 /// 427 /// If this is possible, returns true and appends the best matching set of 428 /// indexes to \p Indexes. If this is not possible, returns false. 429 bool getCoveringSubRegIndexes(const TargetRegisterClass *RC, 430 LaneBitmask LaneMask, 431 SmallVectorImpl<unsigned> &Indexes) const; 432 433 /// The lane masks returned by getSubRegIndexLaneMask() above can only be 434 /// used to determine if sub-registers overlap - they can't be used to 435 /// determine if a set of sub-registers completely cover another 436 /// sub-register. 437 /// 438 /// The X86 general purpose registers have two lanes corresponding to the 439 /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have 440 /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the 441 /// sub_32bit sub-register. 442 /// 443 /// On the other hand, the ARM NEON lanes fully cover their registers: The 444 /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes. 445 /// This is related to the CoveredBySubRegs property on register definitions. 446 /// 447 /// This function returns a bit mask of lanes that completely cover their 448 /// sub-registers. More precisely, given: 449 /// 450 /// Covering = getCoveringLanes(); 451 /// MaskA = getSubRegIndexLaneMask(SubA); 452 /// MaskB = getSubRegIndexLaneMask(SubB); 453 /// 454 /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by 455 /// SubB. 456 LaneBitmask getCoveringLanes() const { return CoveringLanes; } 457 458 /// Returns true if the two registers are equal or alias each other. 459 /// The registers may be virtual registers. 460 bool regsOverlap(Register RegA, Register RegB) const { 461 if (RegA == RegB) 462 return true; 463 if (RegA.isPhysical() && RegB.isPhysical()) 464 return MCRegisterInfo::regsOverlap(RegA.asMCReg(), RegB.asMCReg()); 465 return false; 466 } 467 468 /// Returns true if Reg contains RegUnit. 469 bool hasRegUnit(MCRegister Reg, MCRegUnit RegUnit) const { 470 for (MCRegUnit Unit : regunits(Reg)) 471 if (Unit == RegUnit) 472 return true; 473 return false; 474 } 475 476 /// Returns the original SrcReg unless it is the target of a copy-like 477 /// operation, in which case we chain backwards through all such operations 478 /// to the ultimate source register. If a physical register is encountered, 479 /// we stop the search. 480 virtual Register lookThruCopyLike(Register SrcReg, 481 const MachineRegisterInfo *MRI) const; 482 483 /// Find the original SrcReg unless it is the target of a copy-like operation, 484 /// in which case we chain backwards through all such operations to the 485 /// ultimate source register. If a physical register is encountered, we stop 486 /// the search. 487 /// Return the original SrcReg if all the definitions in the chain only have 488 /// one user and not a physical register. 489 virtual Register 490 lookThruSingleUseCopyChain(Register SrcReg, 491 const MachineRegisterInfo *MRI) const; 492 493 /// Return a null-terminated list of all of the callee-saved registers on 494 /// this target. The register should be in the order of desired callee-save 495 /// stack frame offset. The first register is closest to the incoming stack 496 /// pointer if stack grows down, and vice versa. 497 /// Notice: This function does not take into account disabled CSRs. 498 /// In most cases you will want to use instead the function 499 /// getCalleeSavedRegs that is implemented in MachineRegisterInfo. 500 virtual const MCPhysReg* 501 getCalleeSavedRegs(const MachineFunction *MF) const = 0; 502 503 /// Return a null-terminated list of all of the callee-saved registers on 504 /// this target when IPRA is on. The list should include any non-allocatable 505 /// registers that the backend uses and assumes will be saved by all calling 506 /// conventions. This is typically the ISA-standard frame pointer, but could 507 /// include the thread pointer, TOC pointer, or base pointer for different 508 /// targets. 509 virtual const MCPhysReg *getIPRACSRegs(const MachineFunction *MF) const { 510 return nullptr; 511 } 512 513 /// Return a mask of call-preserved registers for the given calling convention 514 /// on the current function. The mask should include all call-preserved 515 /// aliases. This is used by the register allocator to determine which 516 /// registers can be live across a call. 517 /// 518 /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries. 519 /// A set bit indicates that all bits of the corresponding register are 520 /// preserved across the function call. The bit mask is expected to be 521 /// sub-register complete, i.e. if A is preserved, so are all its 522 /// sub-registers. 523 /// 524 /// Bits are numbered from the LSB, so the bit for physical register Reg can 525 /// be found as (Mask[Reg / 32] >> Reg % 32) & 1. 526 /// 527 /// A NULL pointer means that no register mask will be used, and call 528 /// instructions should use implicit-def operands to indicate call clobbered 529 /// registers. 530 /// 531 virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF, 532 CallingConv::ID) const { 533 // The default mask clobbers everything. All targets should override. 534 return nullptr; 535 } 536 537 /// Return a register mask for the registers preserved by the unwinder, 538 /// or nullptr if no custom mask is needed. 539 virtual const uint32_t * 540 getCustomEHPadPreservedMask(const MachineFunction &MF) const { 541 return nullptr; 542 } 543 544 /// Return a register mask that clobbers everything. 545 virtual const uint32_t *getNoPreservedMask() const { 546 llvm_unreachable("target does not provide no preserved mask"); 547 } 548 549 /// Return a list of all of the registers which are clobbered "inside" a call 550 /// to the given function. For example, these might be needed for PLT 551 /// sequences of long-branch veneers. 552 virtual ArrayRef<MCPhysReg> 553 getIntraCallClobberedRegs(const MachineFunction *MF) const { 554 return {}; 555 } 556 557 /// Return true if all bits that are set in mask \p mask0 are also set in 558 /// \p mask1. 559 bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const; 560 561 /// Return all the call-preserved register masks defined for this target. 562 virtual ArrayRef<const uint32_t *> getRegMasks() const = 0; 563 virtual ArrayRef<const char *> getRegMaskNames() const = 0; 564 565 /// Returns a bitset indexed by physical register number indicating if a 566 /// register is a special register that has particular uses and should be 567 /// considered unavailable at all times, e.g. stack pointer, return address. 568 /// A reserved register: 569 /// - is not allocatable 570 /// - is considered always live 571 /// - is ignored by liveness tracking 572 /// It is often necessary to reserve the super registers of a reserved 573 /// register as well, to avoid them getting allocated indirectly. You may use 574 /// markSuperRegs() and checkAllSuperRegsMarked() in this case. 575 virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0; 576 577 /// Returns either a string explaining why the given register is reserved for 578 /// this function, or an empty optional if no explanation has been written. 579 /// The absence of an explanation does not mean that the register is not 580 /// reserved (meaning, you should check that PhysReg is in fact reserved 581 /// before calling this). 582 virtual std::optional<std::string> 583 explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const { 584 return {}; 585 } 586 587 /// Returns false if we can't guarantee that Physreg, specified as an IR asm 588 /// clobber constraint, will be preserved across the statement. 589 virtual bool isAsmClobberable(const MachineFunction &MF, 590 MCRegister PhysReg) const { 591 return true; 592 } 593 594 /// Returns true if PhysReg cannot be written to in inline asm statements. 595 virtual bool isInlineAsmReadOnlyReg(const MachineFunction &MF, 596 unsigned PhysReg) const { 597 return false; 598 } 599 600 /// Returns true if PhysReg is unallocatable and constant throughout the 601 /// function. Used by MachineRegisterInfo::isConstantPhysReg(). 602 virtual bool isConstantPhysReg(MCRegister PhysReg) const { return false; } 603 604 /// Returns true if the register class is considered divergent. 605 virtual bool isDivergentRegClass(const TargetRegisterClass *RC) const { 606 return false; 607 } 608 609 /// Returns true if the register is considered uniform. 610 virtual bool isUniformReg(const MachineRegisterInfo &MRI, 611 const RegisterBankInfo &RBI, Register Reg) const { 612 return false; 613 } 614 615 /// Returns true if MachineLoopInfo should analyze the given physreg 616 /// for loop invariance. 617 virtual bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const { 618 return false; 619 } 620 621 /// Physical registers that may be modified within a function but are 622 /// guaranteed to be restored before any uses. This is useful for targets that 623 /// have call sequences where a GOT register may be updated by the caller 624 /// prior to a call and is guaranteed to be restored (also by the caller) 625 /// after the call. 626 virtual bool isCallerPreservedPhysReg(MCRegister PhysReg, 627 const MachineFunction &MF) const { 628 return false; 629 } 630 631 /// This is a wrapper around getCallPreservedMask(). 632 /// Return true if the register is preserved after the call. 633 virtual bool isCalleeSavedPhysReg(MCRegister PhysReg, 634 const MachineFunction &MF) const; 635 636 /// Returns true if PhysReg can be used as an argument to a function. 637 virtual bool isArgumentRegister(const MachineFunction &MF, 638 MCRegister PhysReg) const { 639 return false; 640 } 641 642 /// Returns true if PhysReg is a fixed register. 643 virtual bool isFixedRegister(const MachineFunction &MF, 644 MCRegister PhysReg) const { 645 return false; 646 } 647 648 /// Returns true if PhysReg is a general purpose register. 649 virtual bool isGeneralPurposeRegister(const MachineFunction &MF, 650 MCRegister PhysReg) const { 651 return false; 652 } 653 654 /// Returns true if RC is a class/subclass of general purpose register. 655 virtual bool 656 isGeneralPurposeRegisterClass(const TargetRegisterClass *RC) const { 657 return false; 658 } 659 660 /// Prior to adding the live-out mask to a stackmap or patchpoint 661 /// instruction, provide the target the opportunity to adjust it (mainly to 662 /// remove pseudo-registers that should be ignored). 663 virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {} 664 665 /// Return a super-register of the specified register 666 /// Reg so its sub-register of index SubIdx is Reg. 667 MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, 668 const TargetRegisterClass *RC) const { 669 return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC); 670 } 671 672 /// Return a subclass of the specified register 673 /// class A so that each register in it has a sub-register of the 674 /// specified sub-register index which is in the specified register class B. 675 /// 676 /// TableGen will synthesize missing A sub-classes. 677 virtual const TargetRegisterClass * 678 getMatchingSuperRegClass(const TargetRegisterClass *A, 679 const TargetRegisterClass *B, unsigned Idx) const; 680 681 // For a copy-like instruction that defines a register of class DefRC with 682 // subreg index DefSubReg, reading from another source with class SrcRC and 683 // subregister SrcSubReg return true if this is a preferable copy 684 // instruction or an earlier use should be used. 685 virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, 686 unsigned DefSubReg, 687 const TargetRegisterClass *SrcRC, 688 unsigned SrcSubReg) const; 689 690 /// Returns the largest legal sub-class of RC that 691 /// supports the sub-register index Idx. 692 /// If no such sub-class exists, return NULL. 693 /// If all registers in RC already have an Idx sub-register, return RC. 694 /// 695 /// TableGen generates a version of this function that is good enough in most 696 /// cases. Targets can override if they have constraints that TableGen 697 /// doesn't understand. For example, the x86 sub_8bit sub-register index is 698 /// supported by the full GR32 register class in 64-bit mode, but only by the 699 /// GR32_ABCD regiister class in 32-bit mode. 700 /// 701 /// TableGen will synthesize missing RC sub-classes. 702 virtual const TargetRegisterClass * 703 getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const { 704 assert(Idx == 0 && "Target has no sub-registers"); 705 return RC; 706 } 707 708 /// Return a register class that can be used for a subregister copy from/into 709 /// \p SuperRC at \p SubRegIdx. 710 virtual const TargetRegisterClass * 711 getSubRegisterClass(const TargetRegisterClass *SuperRC, 712 unsigned SubRegIdx) const { 713 return nullptr; 714 } 715 716 /// Return the subregister index you get from composing 717 /// two subregister indices. 718 /// 719 /// The special null sub-register index composes as the identity. 720 /// 721 /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b) 722 /// returns c. Note that composeSubRegIndices does not tell you about illegal 723 /// compositions. If R does not have a subreg a, or R:a does not have a subreg 724 /// b, composeSubRegIndices doesn't tell you. 725 /// 726 /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has 727 /// ssub_0:S0 - ssub_3:S3 subregs. 728 /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2. 729 unsigned composeSubRegIndices(unsigned a, unsigned b) const { 730 if (!a) return b; 731 if (!b) return a; 732 return composeSubRegIndicesImpl(a, b); 733 } 734 735 /// Transforms a LaneMask computed for one subregister to the lanemask that 736 /// would have been computed when composing the subsubregisters with IdxA 737 /// first. @sa composeSubRegIndices() 738 LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA, 739 LaneBitmask Mask) const { 740 if (!IdxA) 741 return Mask; 742 return composeSubRegIndexLaneMaskImpl(IdxA, Mask); 743 } 744 745 /// Transform a lanemask given for a virtual register to the corresponding 746 /// lanemask before using subregister with index \p IdxA. 747 /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a 748 /// valie lane mask (no invalid bits set) the following holds: 749 /// X0 = composeSubRegIndexLaneMask(Idx, Mask) 750 /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0) 751 /// => X1 == Mask 752 LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA, 753 LaneBitmask LaneMask) const { 754 if (!IdxA) 755 return LaneMask; 756 return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask); 757 } 758 759 /// Debugging helper: dump register in human readable form to dbgs() stream. 760 static void dumpReg(Register Reg, unsigned SubRegIndex = 0, 761 const TargetRegisterInfo *TRI = nullptr); 762 763 /// Return target defined base register class for a physical register. 764 /// This is the register class with the lowest BaseClassOrder containing the 765 /// register. 766 /// Will be nullptr if the register is not in any base register class. 767 virtual const TargetRegisterClass *getPhysRegBaseClass(MCRegister Reg) const { 768 return nullptr; 769 } 770 771 protected: 772 /// Overridden by TableGen in targets that have sub-registers. 773 virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const { 774 llvm_unreachable("Target has no sub-registers"); 775 } 776 777 /// Overridden by TableGen in targets that have sub-registers. 778 virtual LaneBitmask 779 composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const { 780 llvm_unreachable("Target has no sub-registers"); 781 } 782 783 virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned, 784 LaneBitmask) const { 785 llvm_unreachable("Target has no sub-registers"); 786 } 787 788 /// Return the register cost table index. This implementation is sufficient 789 /// for most architectures and can be overriden by targets in case there are 790 /// multiple cost values associated with each register. 791 virtual unsigned getRegisterCostTableIndex(const MachineFunction &MF) const { 792 return 0; 793 } 794 795 public: 796 /// Find a common super-register class if it exists. 797 /// 798 /// Find a register class, SuperRC and two sub-register indices, PreA and 799 /// PreB, such that: 800 /// 801 /// 1. PreA + SubA == PreB + SubB (using composeSubRegIndices()), and 802 /// 803 /// 2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and 804 /// 805 /// 3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()). 806 /// 807 /// SuperRC will be chosen such that no super-class of SuperRC satisfies the 808 /// requirements, and there is no register class with a smaller spill size 809 /// that satisfies the requirements. 810 /// 811 /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead. 812 /// 813 /// Either of the PreA and PreB sub-register indices may be returned as 0. In 814 /// that case, the returned register class will be a sub-class of the 815 /// corresponding argument register class. 816 /// 817 /// The function returns NULL if no register class can be found. 818 const TargetRegisterClass* 819 getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA, 820 const TargetRegisterClass *RCB, unsigned SubB, 821 unsigned &PreA, unsigned &PreB) const; 822 823 //===--------------------------------------------------------------------===// 824 // Register Class Information 825 // 826 protected: 827 const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const { 828 return RCInfos[getNumRegClasses() * HwMode + RC.getID()]; 829 } 830 831 public: 832 /// Register class iterators 833 regclass_iterator regclass_begin() const { return RegClassBegin; } 834 regclass_iterator regclass_end() const { return RegClassEnd; } 835 iterator_range<regclass_iterator> regclasses() const { 836 return make_range(regclass_begin(), regclass_end()); 837 } 838 839 unsigned getNumRegClasses() const { 840 return (unsigned)(regclass_end()-regclass_begin()); 841 } 842 843 /// Returns the register class associated with the enumeration value. 844 /// See class MCOperandInfo. 845 const TargetRegisterClass *getRegClass(unsigned i) const { 846 assert(i < getNumRegClasses() && "Register Class ID out of range"); 847 return RegClassBegin[i]; 848 } 849 850 /// Returns the name of the register class. 851 const char *getRegClassName(const TargetRegisterClass *Class) const { 852 return MCRegisterInfo::getRegClassName(Class->MC); 853 } 854 855 /// Find the largest common subclass of A and B. 856 /// Return NULL if there is no common subclass. 857 const TargetRegisterClass * 858 getCommonSubClass(const TargetRegisterClass *A, 859 const TargetRegisterClass *B) const; 860 861 /// Returns a TargetRegisterClass used for pointer values. 862 /// If a target supports multiple different pointer register classes, 863 /// kind specifies which one is indicated. 864 virtual const TargetRegisterClass * 865 getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const { 866 llvm_unreachable("Target didn't implement getPointerRegClass!"); 867 } 868 869 /// Returns a legal register class to copy a register in the specified class 870 /// to or from. If it is possible to copy the register directly without using 871 /// a cross register class copy, return the specified RC. Returns NULL if it 872 /// is not possible to copy between two registers of the specified class. 873 virtual const TargetRegisterClass * 874 getCrossCopyRegClass(const TargetRegisterClass *RC) const { 875 return RC; 876 } 877 878 /// Returns the largest super class of RC that is legal to use in the current 879 /// sub-target and has the same spill size. 880 /// The returned register class can be used to create virtual registers which 881 /// means that all its registers can be copied and spilled. 882 virtual const TargetRegisterClass * 883 getLargestLegalSuperClass(const TargetRegisterClass *RC, 884 const MachineFunction &) const { 885 /// The default implementation is very conservative and doesn't allow the 886 /// register allocator to inflate register classes. 887 return RC; 888 } 889 890 /// Return the register pressure "high water mark" for the specific register 891 /// class. The scheduler is in high register pressure mode (for the specific 892 /// register class) if it goes over the limit. 893 /// 894 /// Note: this is the old register pressure model that relies on a manually 895 /// specified representative register class per value type. 896 virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC, 897 MachineFunction &MF) const { 898 return 0; 899 } 900 901 /// Return a heuristic for the machine scheduler to compare the profitability 902 /// of increasing one register pressure set versus another. The scheduler 903 /// will prefer increasing the register pressure of the set which returns 904 /// the largest value for this function. 905 virtual unsigned getRegPressureSetScore(const MachineFunction &MF, 906 unsigned PSetID) const { 907 return PSetID; 908 } 909 910 /// Get the weight in units of pressure for this register class. 911 virtual const RegClassWeight &getRegClassWeight( 912 const TargetRegisterClass *RC) const = 0; 913 914 /// Returns size in bits of a phys/virtual/generic register. 915 TypeSize getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const; 916 917 /// Get the weight in units of pressure for this register unit. 918 virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0; 919 920 /// Get the number of dimensions of register pressure. 921 virtual unsigned getNumRegPressureSets() const = 0; 922 923 /// Get the name of this register unit pressure set. 924 virtual const char *getRegPressureSetName(unsigned Idx) const = 0; 925 926 /// Get the register unit pressure limit for this dimension. 927 /// This limit must be adjusted dynamically for reserved registers. 928 virtual unsigned getRegPressureSetLimit(const MachineFunction &MF, 929 unsigned Idx) const = 0; 930 931 /// Get the dimensions of register pressure impacted by this register class. 932 /// Returns a -1 terminated array of pressure set IDs. 933 virtual const int *getRegClassPressureSets( 934 const TargetRegisterClass *RC) const = 0; 935 936 /// Get the dimensions of register pressure impacted by this register unit. 937 /// Returns a -1 terminated array of pressure set IDs. 938 virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0; 939 940 /// Get a list of 'hint' registers that the register allocator should try 941 /// first when allocating a physical register for the virtual register 942 /// VirtReg. These registers are effectively moved to the front of the 943 /// allocation order. If true is returned, regalloc will try to only use 944 /// hints to the greatest extent possible even if it means spilling. 945 /// 946 /// The Order argument is the allocation order for VirtReg's register class 947 /// as returned from RegisterClassInfo::getOrder(). The hint registers must 948 /// come from Order, and they must not be reserved. 949 /// 950 /// The default implementation of this function will only add target 951 /// independent register allocation hints. Targets that override this 952 /// function should typically call this default implementation as well and 953 /// expect to see generic copy hints added. 954 virtual bool 955 getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order, 956 SmallVectorImpl<MCPhysReg> &Hints, 957 const MachineFunction &MF, 958 const VirtRegMap *VRM = nullptr, 959 const LiveRegMatrix *Matrix = nullptr) const; 960 961 /// A callback to allow target a chance to update register allocation hints 962 /// when a register is "changed" (e.g. coalesced) to another register. 963 /// e.g. On ARM, some virtual registers should target register pairs, 964 /// if one of pair is coalesced to another register, the allocation hint of 965 /// the other half of the pair should be changed to point to the new register. 966 virtual void updateRegAllocHint(Register Reg, Register NewReg, 967 MachineFunction &MF) const { 968 // Do nothing. 969 } 970 971 /// Allow the target to reverse allocation order of local live ranges. This 972 /// will generally allocate shorter local live ranges first. For targets with 973 /// many registers, this could reduce regalloc compile time by a large 974 /// factor. It is disabled by default for three reasons: 975 /// (1) Top-down allocation is simpler and easier to debug for targets that 976 /// don't benefit from reversing the order. 977 /// (2) Bottom-up allocation could result in poor evicition decisions on some 978 /// targets affecting the performance of compiled code. 979 /// (3) Bottom-up allocation is no longer guaranteed to optimally color. 980 virtual bool reverseLocalAssignment() const { return false; } 981 982 /// Allow the target to override the cost of using a callee-saved register for 983 /// the first time. Default value of 0 means we will use a callee-saved 984 /// register if it is available. 985 virtual unsigned getCSRFirstUseCost() const { return 0; } 986 987 /// Returns true if the target requires (and can make use of) the register 988 /// scavenger. 989 virtual bool requiresRegisterScavenging(const MachineFunction &MF) const { 990 return false; 991 } 992 993 /// Returns true if the target wants to use frame pointer based accesses to 994 /// spill to the scavenger emergency spill slot. 995 virtual bool useFPForScavengingIndex(const MachineFunction &MF) const { 996 return true; 997 } 998 999 /// Returns true if the target requires post PEI scavenging of registers for 1000 /// materializing frame index constants. 1001 virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const { 1002 return false; 1003 } 1004 1005 /// Returns true if the target requires using the RegScavenger directly for 1006 /// frame elimination despite using requiresFrameIndexScavenging. 1007 virtual bool requiresFrameIndexReplacementScavenging( 1008 const MachineFunction &MF) const { 1009 return false; 1010 } 1011 1012 /// Returns true if the target wants the LocalStackAllocation pass to be run 1013 /// and virtual base registers used for more efficient stack access. 1014 virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const { 1015 return false; 1016 } 1017 1018 /// Return true if target has reserved a spill slot in the stack frame of 1019 /// the given function for the specified register. e.g. On x86, if the frame 1020 /// register is required, the first fixed stack object is reserved as its 1021 /// spill slot. This tells PEI not to create a new stack frame 1022 /// object for the given register. It should be called only after 1023 /// determineCalleeSaves(). 1024 virtual bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg, 1025 int &FrameIdx) const { 1026 return false; 1027 } 1028 1029 /// Returns true if the live-ins should be tracked after register allocation. 1030 virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const { 1031 return true; 1032 } 1033 1034 /// True if the stack can be realigned for the target. 1035 virtual bool canRealignStack(const MachineFunction &MF) const; 1036 1037 /// True if storage within the function requires the stack pointer to be 1038 /// aligned more than the normal calling convention calls for. 1039 virtual bool shouldRealignStack(const MachineFunction &MF) const; 1040 1041 /// True if stack realignment is required and still possible. 1042 bool hasStackRealignment(const MachineFunction &MF) const { 1043 return shouldRealignStack(MF) && canRealignStack(MF); 1044 } 1045 1046 /// Get the offset from the referenced frame index in the instruction, 1047 /// if there is one. 1048 virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI, 1049 int Idx) const { 1050 return 0; 1051 } 1052 1053 /// Returns true if the instruction's frame index reference would be better 1054 /// served by a base register other than FP or SP. 1055 /// Used by LocalStackFrameAllocation to determine which frame index 1056 /// references it should create new base registers for. 1057 virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 1058 return false; 1059 } 1060 1061 /// Insert defining instruction(s) for a pointer to FrameIdx before 1062 /// insertion point I. Return materialized frame pointer. 1063 virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB, 1064 int FrameIdx, 1065 int64_t Offset) const { 1066 llvm_unreachable("materializeFrameBaseRegister does not exist on this " 1067 "target"); 1068 } 1069 1070 /// Resolve a frame index operand of an instruction 1071 /// to reference the indicated base register plus offset instead. 1072 virtual void resolveFrameIndex(MachineInstr &MI, Register BaseReg, 1073 int64_t Offset) const { 1074 llvm_unreachable("resolveFrameIndex does not exist on this target"); 1075 } 1076 1077 /// Determine whether a given base register plus offset immediate is 1078 /// encodable to resolve a frame index. 1079 virtual bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, 1080 int64_t Offset) const { 1081 llvm_unreachable("isFrameOffsetLegal does not exist on this target"); 1082 } 1083 1084 /// Gets the DWARF expression opcodes for \p Offset. 1085 virtual void getOffsetOpcodes(const StackOffset &Offset, 1086 SmallVectorImpl<uint64_t> &Ops) const; 1087 1088 /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr. 1089 DIExpression * 1090 prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags, 1091 const StackOffset &Offset) const; 1092 1093 /// Spill the register so it can be used by the register scavenger. 1094 /// Return true if the register was spilled, false otherwise. 1095 /// If this function does not spill the register, the scavenger 1096 /// will instead spill it to the emergency spill slot. 1097 virtual bool saveScavengerRegister(MachineBasicBlock &MBB, 1098 MachineBasicBlock::iterator I, 1099 MachineBasicBlock::iterator &UseMI, 1100 const TargetRegisterClass *RC, 1101 Register Reg) const { 1102 return false; 1103 } 1104 1105 /// Process frame indices in reverse block order. This changes the behavior of 1106 /// the RegScavenger passed to eliminateFrameIndex. If this is true targets 1107 /// should scavengeRegisterBackwards in eliminateFrameIndex. New targets 1108 /// should prefer reverse scavenging behavior. 1109 /// TODO: Remove this when all targets return true. 1110 virtual bool eliminateFrameIndicesBackwards() const { return true; } 1111 1112 /// This method must be overriden to eliminate abstract frame indices from 1113 /// instructions which may use them. The instruction referenced by the 1114 /// iterator contains an MO_FrameIndex operand which must be eliminated by 1115 /// this method. This method may modify or replace the specified instruction, 1116 /// as long as it keeps the iterator pointing at the finished product. 1117 /// SPAdj is the SP adjustment due to call frame setup instruction. 1118 /// FIOperandNum is the FI operand number. 1119 /// Returns true if the current instruction was removed and the iterator 1120 /// is not longer valid 1121 virtual bool eliminateFrameIndex(MachineBasicBlock::iterator MI, 1122 int SPAdj, unsigned FIOperandNum, 1123 RegScavenger *RS = nullptr) const = 0; 1124 1125 /// Return the assembly name for \p Reg. 1126 virtual StringRef getRegAsmName(MCRegister Reg) const { 1127 // FIXME: We are assuming that the assembly name is equal to the TableGen 1128 // name converted to lower case 1129 // 1130 // The TableGen name is the name of the definition for this register in the 1131 // target's tablegen files. For example, the TableGen name of 1132 // def EAX : Register <...>; is "EAX" 1133 return StringRef(getName(Reg)); 1134 } 1135 1136 //===--------------------------------------------------------------------===// 1137 /// Subtarget Hooks 1138 1139 /// SrcRC and DstRC will be morphed into NewRC if this returns true. 1140 virtual bool shouldCoalesce(MachineInstr *MI, 1141 const TargetRegisterClass *SrcRC, 1142 unsigned SubReg, 1143 const TargetRegisterClass *DstRC, 1144 unsigned DstSubReg, 1145 const TargetRegisterClass *NewRC, 1146 LiveIntervals &LIS) const 1147 { return true; } 1148 1149 /// Region split has a high compile time cost especially for large live range. 1150 /// This method is used to decide whether or not \p VirtReg should 1151 /// go through this expensive splitting heuristic. 1152 virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF, 1153 const LiveInterval &VirtReg) const; 1154 1155 /// Last chance recoloring has a high compile time cost especially for 1156 /// targets with a lot of registers. 1157 /// This method is used to decide whether or not \p VirtReg should 1158 /// go through this expensive heuristic. 1159 /// When this target hook is hit, by returning false, there is a high 1160 /// chance that the register allocation will fail altogether (usually with 1161 /// "ran out of registers"). 1162 /// That said, this error usually points to another problem in the 1163 /// optimization pipeline. 1164 virtual bool 1165 shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF, 1166 const LiveInterval &VirtReg) const { 1167 return true; 1168 } 1169 1170 /// Deferred spilling delays the spill insertion of a virtual register 1171 /// after every other allocation. By deferring the spilling, it is 1172 /// sometimes possible to eliminate that spilling altogether because 1173 /// something else could have been eliminated, thus leaving some space 1174 /// for the virtual register. 1175 /// However, this comes with a compile time impact because it adds one 1176 /// more stage to the greedy register allocator. 1177 /// This method is used to decide whether \p VirtReg should use the deferred 1178 /// spilling stage instead of being spilled right away. 1179 virtual bool 1180 shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF, 1181 const LiveInterval &VirtReg) const { 1182 return false; 1183 } 1184 1185 /// When prioritizing live ranges in register allocation, if this hook returns 1186 /// true then the AllocationPriority of the register class will be treated as 1187 /// more important than whether the range is local to a basic block or global. 1188 virtual bool 1189 regClassPriorityTrumpsGlobalness(const MachineFunction &MF) const { 1190 return false; 1191 } 1192 1193 //===--------------------------------------------------------------------===// 1194 /// Debug information queries. 1195 1196 /// getFrameRegister - This method should return the register used as a base 1197 /// for values allocated in the current stack frame. 1198 virtual Register getFrameRegister(const MachineFunction &MF) const = 0; 1199 1200 /// Mark a register and all its aliases as reserved in the given set. 1201 void markSuperRegs(BitVector &RegisterSet, MCRegister Reg) const; 1202 1203 /// Returns true if for every register in the set all super registers are part 1204 /// of the set as well. 1205 bool checkAllSuperRegsMarked(const BitVector &RegisterSet, 1206 ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const; 1207 1208 virtual const TargetRegisterClass * 1209 getConstrainedRegClassForOperand(const MachineOperand &MO, 1210 const MachineRegisterInfo &MRI) const { 1211 return nullptr; 1212 } 1213 1214 /// Returns the physical register number of sub-register "Index" 1215 /// for physical register RegNo. Return zero if the sub-register does not 1216 /// exist. 1217 inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const { 1218 return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx); 1219 } 1220 1221 /// Some targets have non-allocatable registers that aren't technically part 1222 /// of the explicit callee saved register list, but should be handled as such 1223 /// in certain cases. 1224 virtual bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const { 1225 return false; 1226 } 1227 1228 virtual std::optional<uint8_t> getVRegFlagValue(StringRef Name) const { 1229 return {}; 1230 } 1231 1232 virtual SmallVector<StringLiteral> 1233 getVRegFlagsOfReg(Register Reg, const MachineFunction &MF) const { 1234 return {}; 1235 } 1236 }; 1237 1238 //===----------------------------------------------------------------------===// 1239 // SuperRegClassIterator 1240 //===----------------------------------------------------------------------===// 1241 // 1242 // Iterate over the possible super-registers for a given register class. The 1243 // iterator will visit a list of pairs (Idx, Mask) corresponding to the 1244 // possible classes of super-registers. 1245 // 1246 // Each bit mask will have at least one set bit, and each set bit in Mask 1247 // corresponds to a SuperRC such that: 1248 // 1249 // For all Reg in SuperRC: Reg:Idx is in RC. 1250 // 1251 // The iterator can include (O, RC->getSubClassMask()) as the first entry which 1252 // also satisfies the above requirement, assuming Reg:0 == Reg. 1253 // 1254 class SuperRegClassIterator { 1255 const unsigned RCMaskWords; 1256 unsigned SubReg = 0; 1257 const uint16_t *Idx; 1258 const uint32_t *Mask; 1259 1260 public: 1261 /// Create a SuperRegClassIterator that visits all the super-register classes 1262 /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry. 1263 SuperRegClassIterator(const TargetRegisterClass *RC, 1264 const TargetRegisterInfo *TRI, 1265 bool IncludeSelf = false) 1266 : RCMaskWords((TRI->getNumRegClasses() + 31) / 32), 1267 Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) { 1268 if (!IncludeSelf) 1269 ++*this; 1270 } 1271 1272 /// Returns true if this iterator is still pointing at a valid entry. 1273 bool isValid() const { return Idx; } 1274 1275 /// Returns the current sub-register index. 1276 unsigned getSubReg() const { return SubReg; } 1277 1278 /// Returns the bit mask of register classes that getSubReg() projects into 1279 /// RC. 1280 /// See TargetRegisterClass::getSubClassMask() for how to use it. 1281 const uint32_t *getMask() const { return Mask; } 1282 1283 /// Advance iterator to the next entry. 1284 void operator++() { 1285 assert(isValid() && "Cannot move iterator past end."); 1286 Mask += RCMaskWords; 1287 SubReg = *Idx++; 1288 if (!SubReg) 1289 Idx = nullptr; 1290 } 1291 }; 1292 1293 //===----------------------------------------------------------------------===// 1294 // BitMaskClassIterator 1295 //===----------------------------------------------------------------------===// 1296 /// This class encapuslates the logic to iterate over bitmask returned by 1297 /// the various RegClass related APIs. 1298 /// E.g., this class can be used to iterate over the subclasses provided by 1299 /// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask. 1300 class BitMaskClassIterator { 1301 /// Total number of register classes. 1302 const unsigned NumRegClasses; 1303 /// Base index of CurrentChunk. 1304 /// In other words, the number of bit we read to get at the 1305 /// beginning of that chunck. 1306 unsigned Base = 0; 1307 /// Adjust base index of CurrentChunk. 1308 /// Base index + how many bit we read within CurrentChunk. 1309 unsigned Idx = 0; 1310 /// Current register class ID. 1311 unsigned ID = 0; 1312 /// Mask we are iterating over. 1313 const uint32_t *Mask; 1314 /// Current chunk of the Mask we are traversing. 1315 uint32_t CurrentChunk; 1316 1317 /// Move ID to the next set bit. 1318 void moveToNextID() { 1319 // If the current chunk of memory is empty, move to the next one, 1320 // while making sure we do not go pass the number of register 1321 // classes. 1322 while (!CurrentChunk) { 1323 // Move to the next chunk. 1324 Base += 32; 1325 if (Base >= NumRegClasses) { 1326 ID = NumRegClasses; 1327 return; 1328 } 1329 CurrentChunk = *++Mask; 1330 Idx = Base; 1331 } 1332 // Otherwise look for the first bit set from the right 1333 // (representation of the class ID is big endian). 1334 // See getSubClassMask for more details on the representation. 1335 unsigned Offset = llvm::countr_zero(CurrentChunk); 1336 // Add the Offset to the adjusted base number of this chunk: Idx. 1337 // This is the ID of the register class. 1338 ID = Idx + Offset; 1339 1340 // Consume the zeros, if any, and the bit we just read 1341 // so that we are at the right spot for the next call. 1342 // Do not do Offset + 1 because Offset may be 31 and 32 1343 // will be UB for the shift, though in that case we could 1344 // have make the chunk being equal to 0, but that would 1345 // have introduced a if statement. 1346 moveNBits(Offset); 1347 moveNBits(1); 1348 } 1349 1350 /// Move \p NumBits Bits forward in CurrentChunk. 1351 void moveNBits(unsigned NumBits) { 1352 assert(NumBits < 32 && "Undefined behavior spotted!"); 1353 // Consume the bit we read for the next call. 1354 CurrentChunk >>= NumBits; 1355 // Adjust the base for the chunk. 1356 Idx += NumBits; 1357 } 1358 1359 public: 1360 /// Create a BitMaskClassIterator that visits all the register classes 1361 /// represented by \p Mask. 1362 /// 1363 /// \pre \p Mask != nullptr 1364 BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI) 1365 : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) { 1366 // Move to the first ID. 1367 moveToNextID(); 1368 } 1369 1370 /// Returns true if this iterator is still pointing at a valid entry. 1371 bool isValid() const { return getID() != NumRegClasses; } 1372 1373 /// Returns the current register class ID. 1374 unsigned getID() const { return ID; } 1375 1376 /// Advance iterator to the next entry. 1377 void operator++() { 1378 assert(isValid() && "Cannot move iterator past end."); 1379 moveToNextID(); 1380 } 1381 }; 1382 1383 // This is useful when building IndexedMaps keyed on virtual registers 1384 struct VirtReg2IndexFunctor { 1385 using argument_type = Register; 1386 unsigned operator()(Register Reg) const { 1387 return Register::virtReg2Index(Reg); 1388 } 1389 }; 1390 1391 /// Prints virtual and physical registers with or without a TRI instance. 1392 /// 1393 /// The format is: 1394 /// %noreg - NoRegister 1395 /// %5 - a virtual register. 1396 /// %5:sub_8bit - a virtual register with sub-register index (with TRI). 1397 /// %eax - a physical register 1398 /// %physreg17 - a physical register when no TRI instance given. 1399 /// 1400 /// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n'; 1401 Printable printReg(Register Reg, const TargetRegisterInfo *TRI = nullptr, 1402 unsigned SubIdx = 0, 1403 const MachineRegisterInfo *MRI = nullptr); 1404 1405 /// Create Printable object to print register units on a \ref raw_ostream. 1406 /// 1407 /// Register units are named after their root registers: 1408 /// 1409 /// al - Single root. 1410 /// fp0~st7 - Dual roots. 1411 /// 1412 /// Usage: OS << printRegUnit(Unit, TRI) << '\n'; 1413 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI); 1414 1415 /// Create Printable object to print virtual registers and physical 1416 /// registers on a \ref raw_ostream. 1417 Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI); 1418 1419 /// Create Printable object to print register classes or register banks 1420 /// on a \ref raw_ostream. 1421 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo, 1422 const TargetRegisterInfo *TRI); 1423 1424 } // end namespace llvm 1425 1426 #endif // LLVM_CODEGEN_TARGETREGISTERINFO_H 1427