1 //==- TargetRegisterInfo.cpp - Target Register Information Implementation --==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the TargetRegisterInfo interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetRegisterInfo.h" 14 #include "llvm/ADT/ArrayRef.h" 15 #include "llvm/ADT/BitVector.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/ADT/StringExtras.h" 19 #include "llvm/BinaryFormat/Dwarf.h" 20 #include "llvm/CodeGen/LiveInterval.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetFrameLowering.h" 25 #include "llvm/CodeGen/TargetInstrInfo.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/CodeGen/VirtRegMap.h" 28 #include "llvm/CodeGenTypes/MachineValueType.h" 29 #include "llvm/Config/llvm-config.h" 30 #include "llvm/IR/Attributes.h" 31 #include "llvm/IR/DebugInfoMetadata.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/MC/MCRegisterInfo.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Support/Compiler.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/MathExtras.h" 38 #include "llvm/Support/Printable.h" 39 #include "llvm/Support/raw_ostream.h" 40 #include <cassert> 41 #include <utility> 42 43 #define DEBUG_TYPE "target-reg-info" 44 45 using namespace llvm; 46 47 static cl::opt<unsigned> 48 HugeSizeForSplit("huge-size-for-split", cl::Hidden, 49 cl::desc("A threshold of live range size which may cause " 50 "high compile time cost in global splitting."), 51 cl::init(5000)); 52 53 TargetRegisterInfo::TargetRegisterInfo( 54 const TargetRegisterInfoDesc *ID, regclass_iterator RCB, 55 regclass_iterator RCE, const char *const *SRINames, 56 const SubRegCoveredBits *SubIdxRanges, const LaneBitmask *SRILaneMasks, 57 LaneBitmask SRICoveringLanes, const RegClassInfo *const RCIs, 58 const MVT::SimpleValueType *const RCVTLists, unsigned Mode) 59 : InfoDesc(ID), SubRegIndexNames(SRINames), SubRegIdxRanges(SubIdxRanges), 60 SubRegIndexLaneMasks(SRILaneMasks), RegClassBegin(RCB), RegClassEnd(RCE), 61 CoveringLanes(SRICoveringLanes), RCInfos(RCIs), RCVTLists(RCVTLists), 62 HwMode(Mode) {} 63 64 TargetRegisterInfo::~TargetRegisterInfo() = default; 65 66 bool TargetRegisterInfo::shouldRegionSplitForVirtReg( 67 const MachineFunction &MF, const LiveInterval &VirtReg) const { 68 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 69 const MachineRegisterInfo &MRI = MF.getRegInfo(); 70 MachineInstr *MI = MRI.getUniqueVRegDef(VirtReg.reg()); 71 if (MI && TII->isTriviallyReMaterializable(*MI) && 72 VirtReg.size() > HugeSizeForSplit) 73 return false; 74 return true; 75 } 76 77 void TargetRegisterInfo::markSuperRegs(BitVector &RegisterSet, 78 MCRegister Reg) const { 79 for (MCPhysReg SR : superregs_inclusive(Reg)) 80 RegisterSet.set(SR); 81 } 82 83 bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet, 84 ArrayRef<MCPhysReg> Exceptions) const { 85 // Check that all super registers of reserved regs are reserved as well. 86 BitVector Checked(getNumRegs()); 87 for (unsigned Reg : RegisterSet.set_bits()) { 88 if (Checked[Reg]) 89 continue; 90 for (MCPhysReg SR : superregs(Reg)) { 91 if (!RegisterSet[SR] && !is_contained(Exceptions, Reg)) { 92 dbgs() << "Error: Super register " << printReg(SR, this) 93 << " of reserved register " << printReg(Reg, this) 94 << " is not reserved.\n"; 95 return false; 96 } 97 98 // We transitively check superregs. So we can remember this for later 99 // to avoid compiletime explosion in deep register hierarchies. 100 Checked.set(SR); 101 } 102 } 103 return true; 104 } 105 106 namespace llvm { 107 108 Printable printReg(Register Reg, const TargetRegisterInfo *TRI, 109 unsigned SubIdx, const MachineRegisterInfo *MRI) { 110 return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) { 111 if (!Reg) 112 OS << "$noreg"; 113 else if (Register::isStackSlot(Reg)) 114 OS << "SS#" << Register::stackSlot2Index(Reg); 115 else if (Reg.isVirtual()) { 116 StringRef Name = MRI ? MRI->getVRegName(Reg) : ""; 117 if (Name != "") { 118 OS << '%' << Name; 119 } else { 120 OS << '%' << Register::virtReg2Index(Reg); 121 } 122 } else if (!TRI) 123 OS << '$' << "physreg" << Reg; 124 else if (Reg < TRI->getNumRegs()) { 125 OS << '$'; 126 printLowerCase(TRI->getName(Reg), OS); 127 } else 128 llvm_unreachable("Register kind is unsupported."); 129 130 if (SubIdx) { 131 if (TRI) 132 OS << ':' << TRI->getSubRegIndexName(SubIdx); 133 else 134 OS << ":sub(" << SubIdx << ')'; 135 } 136 }); 137 } 138 139 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) { 140 return Printable([Unit, TRI](raw_ostream &OS) { 141 // Generic printout when TRI is missing. 142 if (!TRI) { 143 OS << "Unit~" << Unit; 144 return; 145 } 146 147 // Check for invalid register units. 148 if (Unit >= TRI->getNumRegUnits()) { 149 OS << "BadUnit~" << Unit; 150 return; 151 } 152 153 // Normal units have at least one root. 154 MCRegUnitRootIterator Roots(Unit, TRI); 155 assert(Roots.isValid() && "Unit has no roots."); 156 OS << TRI->getName(*Roots); 157 for (++Roots; Roots.isValid(); ++Roots) 158 OS << '~' << TRI->getName(*Roots); 159 }); 160 } 161 162 Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) { 163 return Printable([Unit, TRI](raw_ostream &OS) { 164 if (Register::isVirtualRegister(Unit)) { 165 OS << '%' << Register::virtReg2Index(Unit); 166 } else { 167 OS << printRegUnit(Unit, TRI); 168 } 169 }); 170 } 171 172 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo, 173 const TargetRegisterInfo *TRI) { 174 return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) { 175 if (RegInfo.getRegClassOrNull(Reg)) 176 OS << StringRef(TRI->getRegClassName(RegInfo.getRegClass(Reg))).lower(); 177 else if (RegInfo.getRegBankOrNull(Reg)) 178 OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower(); 179 else { 180 OS << "_"; 181 assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) && 182 "Generic registers must have a valid type"); 183 } 184 }); 185 } 186 187 } // end namespace llvm 188 189 /// getAllocatableClass - Return the maximal subclass of the given register 190 /// class that is alloctable, or NULL. 191 const TargetRegisterClass * 192 TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const { 193 if (!RC || RC->isAllocatable()) 194 return RC; 195 196 for (BitMaskClassIterator It(RC->getSubClassMask(), *this); It.isValid(); 197 ++It) { 198 const TargetRegisterClass *SubRC = getRegClass(It.getID()); 199 if (SubRC->isAllocatable()) 200 return SubRC; 201 } 202 return nullptr; 203 } 204 205 /// getMinimalPhysRegClass - Returns the Register Class of a physical 206 /// register of the given type, picking the most sub register class of 207 /// the right type that contains this physreg. 208 const TargetRegisterClass * 209 TargetRegisterInfo::getMinimalPhysRegClass(MCRegister reg, MVT VT) const { 210 assert(Register::isPhysicalRegister(reg) && 211 "reg must be a physical register"); 212 213 // Pick the most sub register class of the right type that contains 214 // this physreg. 215 const TargetRegisterClass* BestRC = nullptr; 216 for (const TargetRegisterClass* RC : regclasses()) { 217 if ((VT == MVT::Other || isTypeLegalForClass(*RC, VT)) && 218 RC->contains(reg) && (!BestRC || BestRC->hasSubClass(RC))) 219 BestRC = RC; 220 } 221 222 assert(BestRC && "Couldn't find the register class"); 223 return BestRC; 224 } 225 226 const TargetRegisterClass * 227 TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister reg, LLT Ty) const { 228 assert(Register::isPhysicalRegister(reg) && 229 "reg must be a physical register"); 230 231 // Pick the most sub register class of the right type that contains 232 // this physreg. 233 const TargetRegisterClass *BestRC = nullptr; 234 for (const TargetRegisterClass *RC : regclasses()) { 235 if ((!Ty.isValid() || isTypeLegalForClass(*RC, Ty)) && RC->contains(reg) && 236 (!BestRC || BestRC->hasSubClass(RC))) 237 BestRC = RC; 238 } 239 240 return BestRC; 241 } 242 243 /// getAllocatableSetForRC - Toggle the bits that represent allocatable 244 /// registers for the specific register class. 245 static void getAllocatableSetForRC(const MachineFunction &MF, 246 const TargetRegisterClass *RC, BitVector &R){ 247 assert(RC->isAllocatable() && "invalid for nonallocatable sets"); 248 ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF); 249 for (MCPhysReg PR : Order) 250 R.set(PR); 251 } 252 253 BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF, 254 const TargetRegisterClass *RC) const { 255 BitVector Allocatable(getNumRegs()); 256 if (RC) { 257 // A register class with no allocatable subclass returns an empty set. 258 const TargetRegisterClass *SubClass = getAllocatableClass(RC); 259 if (SubClass) 260 getAllocatableSetForRC(MF, SubClass, Allocatable); 261 } else { 262 for (const TargetRegisterClass *C : regclasses()) 263 if (C->isAllocatable()) 264 getAllocatableSetForRC(MF, C, Allocatable); 265 } 266 267 // Mask out the reserved registers 268 const MachineRegisterInfo &MRI = MF.getRegInfo(); 269 const BitVector &Reserved = MRI.getReservedRegs(); 270 Allocatable.reset(Reserved); 271 272 return Allocatable; 273 } 274 275 static inline 276 const TargetRegisterClass *firstCommonClass(const uint32_t *A, 277 const uint32_t *B, 278 const TargetRegisterInfo *TRI) { 279 for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32) 280 if (unsigned Common = *A++ & *B++) 281 return TRI->getRegClass(I + llvm::countr_zero(Common)); 282 return nullptr; 283 } 284 285 const TargetRegisterClass * 286 TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A, 287 const TargetRegisterClass *B) const { 288 // First take care of the trivial cases. 289 if (A == B) 290 return A; 291 if (!A || !B) 292 return nullptr; 293 294 // Register classes are ordered topologically, so the largest common 295 // sub-class it the common sub-class with the smallest ID. 296 return firstCommonClass(A->getSubClassMask(), B->getSubClassMask(), this); 297 } 298 299 const TargetRegisterClass * 300 TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 301 const TargetRegisterClass *B, 302 unsigned Idx) const { 303 assert(A && B && "Missing register class"); 304 assert(Idx && "Bad sub-register index"); 305 306 // Find Idx in the list of super-register indices. 307 for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI) 308 if (RCI.getSubReg() == Idx) 309 // The bit mask contains all register classes that are projected into B 310 // by Idx. Find a class that is also a sub-class of A. 311 return firstCommonClass(RCI.getMask(), A->getSubClassMask(), this); 312 return nullptr; 313 } 314 315 const TargetRegisterClass *TargetRegisterInfo:: 316 getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA, 317 const TargetRegisterClass *RCB, unsigned SubB, 318 unsigned &PreA, unsigned &PreB) const { 319 assert(RCA && SubA && RCB && SubB && "Invalid arguments"); 320 321 // Search all pairs of sub-register indices that project into RCA and RCB 322 // respectively. This is quadratic, but usually the sets are very small. On 323 // most targets like X86, there will only be a single sub-register index 324 // (e.g., sub_16bit projecting into GR16). 325 // 326 // The worst case is a register class like DPR on ARM. 327 // We have indices dsub_0..dsub_7 projecting into that class. 328 // 329 // It is very common that one register class is a sub-register of the other. 330 // Arrange for RCA to be the larger register so the answer will be found in 331 // the first iteration. This makes the search linear for the most common 332 // case. 333 const TargetRegisterClass *BestRC = nullptr; 334 unsigned *BestPreA = &PreA; 335 unsigned *BestPreB = &PreB; 336 if (getRegSizeInBits(*RCA) < getRegSizeInBits(*RCB)) { 337 std::swap(RCA, RCB); 338 std::swap(SubA, SubB); 339 std::swap(BestPreA, BestPreB); 340 } 341 342 // Also terminate the search one we have found a register class as small as 343 // RCA. 344 unsigned MinSize = getRegSizeInBits(*RCA); 345 346 for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) { 347 unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA); 348 for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) { 349 // Check if a common super-register class exists for this index pair. 350 const TargetRegisterClass *RC = 351 firstCommonClass(IA.getMask(), IB.getMask(), this); 352 if (!RC || getRegSizeInBits(*RC) < MinSize) 353 continue; 354 355 // The indexes must compose identically: PreA+SubA == PreB+SubB. 356 unsigned FinalB = composeSubRegIndices(IB.getSubReg(), SubB); 357 if (FinalA != FinalB) 358 continue; 359 360 // Is RC a better candidate than BestRC? 361 if (BestRC && getRegSizeInBits(*RC) >= getRegSizeInBits(*BestRC)) 362 continue; 363 364 // Yes, RC is the smallest super-register seen so far. 365 BestRC = RC; 366 *BestPreA = IA.getSubReg(); 367 *BestPreB = IB.getSubReg(); 368 369 // Bail early if we reached MinSize. We won't find a better candidate. 370 if (getRegSizeInBits(*BestRC) == MinSize) 371 return BestRC; 372 } 373 } 374 return BestRC; 375 } 376 377 /// Check if the registers defined by the pair (RegisterClass, SubReg) 378 /// share the same register file. 379 static bool shareSameRegisterFile(const TargetRegisterInfo &TRI, 380 const TargetRegisterClass *DefRC, 381 unsigned DefSubReg, 382 const TargetRegisterClass *SrcRC, 383 unsigned SrcSubReg) { 384 // Same register class. 385 if (DefRC == SrcRC) 386 return true; 387 388 // Both operands are sub registers. Check if they share a register class. 389 unsigned SrcIdx, DefIdx; 390 if (SrcSubReg && DefSubReg) { 391 return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg, 392 SrcIdx, DefIdx) != nullptr; 393 } 394 395 // At most one of the register is a sub register, make it Src to avoid 396 // duplicating the test. 397 if (!SrcSubReg) { 398 std::swap(DefSubReg, SrcSubReg); 399 std::swap(DefRC, SrcRC); 400 } 401 402 // One of the register is a sub register, check if we can get a superclass. 403 if (SrcSubReg) 404 return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != nullptr; 405 406 // Plain copy. 407 return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr; 408 } 409 410 bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC, 411 unsigned DefSubReg, 412 const TargetRegisterClass *SrcRC, 413 unsigned SrcSubReg) const { 414 // If this source does not incur a cross register bank copy, use it. 415 return shareSameRegisterFile(*this, DefRC, DefSubReg, SrcRC, SrcSubReg); 416 } 417 418 // Compute target-independent register allocator hints to help eliminate copies. 419 bool TargetRegisterInfo::getRegAllocationHints( 420 Register VirtReg, ArrayRef<MCPhysReg> Order, 421 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 422 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 423 const MachineRegisterInfo &MRI = MF.getRegInfo(); 424 const std::pair<unsigned, SmallVector<Register, 4>> &Hints_MRI = 425 MRI.getRegAllocationHints(VirtReg); 426 427 SmallSet<Register, 32> HintedRegs; 428 // First hint may be a target hint. 429 bool Skip = (Hints_MRI.first != 0); 430 for (auto Reg : Hints_MRI.second) { 431 if (Skip) { 432 Skip = false; 433 continue; 434 } 435 436 // Target-independent hints are either a physical or a virtual register. 437 Register Phys = Reg; 438 if (VRM && Phys.isVirtual()) 439 Phys = VRM->getPhys(Phys); 440 441 // Don't add the same reg twice (Hints_MRI may contain multiple virtual 442 // registers allocated to the same physreg). 443 if (!HintedRegs.insert(Phys).second) 444 continue; 445 // Check that Phys is a valid hint in VirtReg's register class. 446 if (!Phys.isPhysical()) 447 continue; 448 if (MRI.isReserved(Phys)) 449 continue; 450 // Check that Phys is in the allocation order. We shouldn't heed hints 451 // from VirtReg's register class if they aren't in the allocation order. The 452 // target probably has a reason for removing the register. 453 if (!is_contained(Order, Phys)) 454 continue; 455 456 // All clear, tell the register allocator to prefer this register. 457 Hints.push_back(Phys); 458 } 459 return false; 460 } 461 462 bool TargetRegisterInfo::isCalleeSavedPhysReg( 463 MCRegister PhysReg, const MachineFunction &MF) const { 464 if (PhysReg == 0) 465 return false; 466 const uint32_t *callerPreservedRegs = 467 getCallPreservedMask(MF, MF.getFunction().getCallingConv()); 468 if (callerPreservedRegs) { 469 assert(Register::isPhysicalRegister(PhysReg) && 470 "Expected physical register"); 471 return (callerPreservedRegs[PhysReg / 32] >> PhysReg % 32) & 1; 472 } 473 return false; 474 } 475 476 bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const { 477 return MF.getFrameInfo().isStackRealignable(); 478 } 479 480 bool TargetRegisterInfo::shouldRealignStack(const MachineFunction &MF) const { 481 return MF.getFrameInfo().shouldRealignStack(); 482 } 483 484 bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0, 485 const uint32_t *mask1) const { 486 unsigned N = (getNumRegs()+31) / 32; 487 for (unsigned I = 0; I < N; ++I) 488 if ((mask0[I] & mask1[I]) != mask0[I]) 489 return false; 490 return true; 491 } 492 493 TypeSize 494 TargetRegisterInfo::getRegSizeInBits(Register Reg, 495 const MachineRegisterInfo &MRI) const { 496 const TargetRegisterClass *RC{}; 497 if (Reg.isPhysical()) { 498 // The size is not directly available for physical registers. 499 // Instead, we need to access a register class that contains Reg and 500 // get the size of that register class. 501 RC = getMinimalPhysRegClass(Reg); 502 assert(RC && "Unable to deduce the register class"); 503 return getRegSizeInBits(*RC); 504 } 505 LLT Ty = MRI.getType(Reg); 506 if (Ty.isValid()) 507 return Ty.getSizeInBits(); 508 509 // Since Reg is not a generic register, it may have a register class. 510 RC = MRI.getRegClass(Reg); 511 assert(RC && "Unable to deduce the register class"); 512 return getRegSizeInBits(*RC); 513 } 514 515 bool TargetRegisterInfo::getCoveringSubRegIndexes( 516 const MachineRegisterInfo &MRI, const TargetRegisterClass *RC, 517 LaneBitmask LaneMask, SmallVectorImpl<unsigned> &NeededIndexes) const { 518 SmallVector<unsigned, 8> PossibleIndexes; 519 unsigned BestIdx = 0; 520 unsigned BestCover = 0; 521 522 for (unsigned Idx = 1, E = getNumSubRegIndices(); Idx < E; ++Idx) { 523 // Is this index even compatible with the given class? 524 if (getSubClassWithSubReg(RC, Idx) != RC) 525 continue; 526 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx); 527 // Early exit if we found a perfect match. 528 if (SubRegMask == LaneMask) { 529 BestIdx = Idx; 530 break; 531 } 532 533 // The index must not cover any lanes outside \p LaneMask. 534 if ((SubRegMask & ~LaneMask).any()) 535 continue; 536 537 unsigned PopCount = SubRegMask.getNumLanes(); 538 PossibleIndexes.push_back(Idx); 539 if (PopCount > BestCover) { 540 BestCover = PopCount; 541 BestIdx = Idx; 542 } 543 } 544 545 // Abort if we cannot possibly implement the COPY with the given indexes. 546 if (BestIdx == 0) 547 return false; 548 549 NeededIndexes.push_back(BestIdx); 550 551 // Greedy heuristic: Keep iterating keeping the best covering subreg index 552 // each time. 553 LaneBitmask LanesLeft = LaneMask & ~getSubRegIndexLaneMask(BestIdx); 554 while (LanesLeft.any()) { 555 unsigned BestIdx = 0; 556 int BestCover = std::numeric_limits<int>::min(); 557 for (unsigned Idx : PossibleIndexes) { 558 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx); 559 // Early exit if we found a perfect match. 560 if (SubRegMask == LanesLeft) { 561 BestIdx = Idx; 562 break; 563 } 564 565 // Do not cover already-covered lanes to avoid creating cycles 566 // in copy bundles (= bundle contains copies that write to the 567 // registers). 568 if ((SubRegMask & ~LanesLeft).any()) 569 continue; 570 571 // Try to cover as many of the remaining lanes as possible. 572 const int Cover = (SubRegMask & LanesLeft).getNumLanes(); 573 if (Cover > BestCover) { 574 BestCover = Cover; 575 BestIdx = Idx; 576 } 577 } 578 579 if (BestIdx == 0) 580 return false; // Impossible to handle 581 582 NeededIndexes.push_back(BestIdx); 583 584 LanesLeft &= ~getSubRegIndexLaneMask(BestIdx); 585 } 586 587 return BestIdx; 588 } 589 590 unsigned TargetRegisterInfo::getSubRegIdxSize(unsigned Idx) const { 591 assert(Idx && Idx < getNumSubRegIndices() && 592 "This is not a subregister index"); 593 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Size; 594 } 595 596 unsigned TargetRegisterInfo::getSubRegIdxOffset(unsigned Idx) const { 597 assert(Idx && Idx < getNumSubRegIndices() && 598 "This is not a subregister index"); 599 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Offset; 600 } 601 602 Register 603 TargetRegisterInfo::lookThruCopyLike(Register SrcReg, 604 const MachineRegisterInfo *MRI) const { 605 while (true) { 606 const MachineInstr *MI = MRI->getVRegDef(SrcReg); 607 if (!MI->isCopyLike()) 608 return SrcReg; 609 610 Register CopySrcReg; 611 if (MI->isCopy()) 612 CopySrcReg = MI->getOperand(1).getReg(); 613 else { 614 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike"); 615 CopySrcReg = MI->getOperand(2).getReg(); 616 } 617 618 if (!CopySrcReg.isVirtual()) 619 return CopySrcReg; 620 621 SrcReg = CopySrcReg; 622 } 623 } 624 625 Register TargetRegisterInfo::lookThruSingleUseCopyChain( 626 Register SrcReg, const MachineRegisterInfo *MRI) const { 627 while (true) { 628 const MachineInstr *MI = MRI->getVRegDef(SrcReg); 629 // Found the real definition, return it if it has a single use. 630 if (!MI->isCopyLike()) 631 return MRI->hasOneNonDBGUse(SrcReg) ? SrcReg : Register(); 632 633 Register CopySrcReg; 634 if (MI->isCopy()) 635 CopySrcReg = MI->getOperand(1).getReg(); 636 else { 637 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike"); 638 CopySrcReg = MI->getOperand(2).getReg(); 639 } 640 641 // Continue only if the next definition in the chain is for a virtual 642 // register that has a single use. 643 if (!CopySrcReg.isVirtual() || !MRI->hasOneNonDBGUse(CopySrcReg)) 644 return Register(); 645 646 SrcReg = CopySrcReg; 647 } 648 } 649 650 void TargetRegisterInfo::getOffsetOpcodes( 651 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const { 652 assert(!Offset.getScalable() && "Scalable offsets are not handled"); 653 DIExpression::appendOffset(Ops, Offset.getFixed()); 654 } 655 656 DIExpression * 657 TargetRegisterInfo::prependOffsetExpression(const DIExpression *Expr, 658 unsigned PrependFlags, 659 const StackOffset &Offset) const { 660 assert((PrependFlags & 661 ~(DIExpression::DerefBefore | DIExpression::DerefAfter | 662 DIExpression::StackValue | DIExpression::EntryValue)) == 0 && 663 "Unsupported prepend flag"); 664 SmallVector<uint64_t, 16> OffsetExpr; 665 if (PrependFlags & DIExpression::DerefBefore) 666 OffsetExpr.push_back(dwarf::DW_OP_deref); 667 getOffsetOpcodes(Offset, OffsetExpr); 668 if (PrependFlags & DIExpression::DerefAfter) 669 OffsetExpr.push_back(dwarf::DW_OP_deref); 670 return DIExpression::prependOpcodes(Expr, OffsetExpr, 671 PrependFlags & DIExpression::StackValue, 672 PrependFlags & DIExpression::EntryValue); 673 } 674 675 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 676 LLVM_DUMP_METHOD 677 void TargetRegisterInfo::dumpReg(Register Reg, unsigned SubRegIndex, 678 const TargetRegisterInfo *TRI) { 679 dbgs() << printReg(Reg, TRI, SubRegIndex) << "\n"; 680 } 681 #endif 682