1 //==- TargetRegisterInfo.cpp - Target Register Information Implementation --==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the TargetRegisterInfo interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetRegisterInfo.h" 14 #include "llvm/ADT/ArrayRef.h" 15 #include "llvm/ADT/BitVector.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/ADT/StringExtras.h" 19 #include "llvm/BinaryFormat/Dwarf.h" 20 #include "llvm/CodeGen/LiveInterval.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineRegisterInfo.h" 24 #include "llvm/CodeGen/TargetFrameLowering.h" 25 #include "llvm/CodeGen/TargetInstrInfo.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/CodeGen/VirtRegMap.h" 28 #include "llvm/CodeGenTypes/MachineValueType.h" 29 #include "llvm/Config/llvm-config.h" 30 #include "llvm/IR/Attributes.h" 31 #include "llvm/IR/DebugInfoMetadata.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/MC/MCRegisterInfo.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Support/Compiler.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/Printable.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include <cassert> 40 #include <utility> 41 42 #define DEBUG_TYPE "target-reg-info" 43 44 using namespace llvm; 45 46 static cl::opt<unsigned> 47 HugeSizeForSplit("huge-size-for-split", cl::Hidden, 48 cl::desc("A threshold of live range size which may cause " 49 "high compile time cost in global splitting."), 50 cl::init(5000)); 51 52 TargetRegisterInfo::TargetRegisterInfo( 53 const TargetRegisterInfoDesc *ID, regclass_iterator RCB, 54 regclass_iterator RCE, const char *const *SRINames, 55 const SubRegCoveredBits *SubIdxRanges, const LaneBitmask *SRILaneMasks, 56 LaneBitmask SRICoveringLanes, const RegClassInfo *const RCIs, 57 const MVT::SimpleValueType *const RCVTLists, unsigned Mode) 58 : InfoDesc(ID), SubRegIndexNames(SRINames), SubRegIdxRanges(SubIdxRanges), 59 SubRegIndexLaneMasks(SRILaneMasks), RegClassBegin(RCB), RegClassEnd(RCE), 60 CoveringLanes(SRICoveringLanes), RCInfos(RCIs), RCVTLists(RCVTLists), 61 HwMode(Mode) {} 62 63 TargetRegisterInfo::~TargetRegisterInfo() = default; 64 65 bool TargetRegisterInfo::shouldRegionSplitForVirtReg( 66 const MachineFunction &MF, const LiveInterval &VirtReg) const { 67 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 68 const MachineRegisterInfo &MRI = MF.getRegInfo(); 69 MachineInstr *MI = MRI.getUniqueVRegDef(VirtReg.reg()); 70 if (MI && TII->isTriviallyReMaterializable(*MI) && 71 VirtReg.size() > HugeSizeForSplit) 72 return false; 73 return true; 74 } 75 76 void TargetRegisterInfo::markSuperRegs(BitVector &RegisterSet, 77 MCRegister Reg) const { 78 for (MCPhysReg SR : superregs_inclusive(Reg)) 79 RegisterSet.set(SR); 80 } 81 82 bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet, 83 ArrayRef<MCPhysReg> Exceptions) const { 84 // Check that all super registers of reserved regs are reserved as well. 85 BitVector Checked(getNumRegs()); 86 for (unsigned Reg : RegisterSet.set_bits()) { 87 if (Checked[Reg]) 88 continue; 89 for (MCPhysReg SR : superregs(Reg)) { 90 if (!RegisterSet[SR] && !is_contained(Exceptions, Reg)) { 91 dbgs() << "Error: Super register " << printReg(SR, this) 92 << " of reserved register " << printReg(Reg, this) 93 << " is not reserved.\n"; 94 return false; 95 } 96 97 // We transitively check superregs. So we can remember this for later 98 // to avoid compiletime explosion in deep register hierarchies. 99 Checked.set(SR); 100 } 101 } 102 return true; 103 } 104 105 namespace llvm { 106 107 Printable printReg(Register Reg, const TargetRegisterInfo *TRI, 108 unsigned SubIdx, const MachineRegisterInfo *MRI) { 109 return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) { 110 if (!Reg) 111 OS << "$noreg"; 112 else if (Register::isStackSlot(Reg)) 113 OS << "SS#" << Register::stackSlot2Index(Reg); 114 else if (Reg.isVirtual()) { 115 StringRef Name = MRI ? MRI->getVRegName(Reg) : ""; 116 if (Name != "") { 117 OS << '%' << Name; 118 } else { 119 OS << '%' << Register::virtReg2Index(Reg); 120 } 121 } else if (!TRI) 122 OS << '$' << "physreg" << Reg.id(); 123 else if (Reg < TRI->getNumRegs()) { 124 OS << '$'; 125 printLowerCase(TRI->getName(Reg), OS); 126 } else 127 llvm_unreachable("Register kind is unsupported."); 128 129 if (SubIdx) { 130 if (TRI) 131 OS << ':' << TRI->getSubRegIndexName(SubIdx); 132 else 133 OS << ":sub(" << SubIdx << ')'; 134 } 135 }); 136 } 137 138 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) { 139 return Printable([Unit, TRI](raw_ostream &OS) { 140 // Generic printout when TRI is missing. 141 if (!TRI) { 142 OS << "Unit~" << Unit; 143 return; 144 } 145 146 // Check for invalid register units. 147 if (Unit >= TRI->getNumRegUnits()) { 148 OS << "BadUnit~" << Unit; 149 return; 150 } 151 152 // Normal units have at least one root. 153 MCRegUnitRootIterator Roots(Unit, TRI); 154 assert(Roots.isValid() && "Unit has no roots."); 155 OS << TRI->getName(*Roots); 156 for (++Roots; Roots.isValid(); ++Roots) 157 OS << '~' << TRI->getName(*Roots); 158 }); 159 } 160 161 Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) { 162 return Printable([Unit, TRI](raw_ostream &OS) { 163 if (Register::isVirtualRegister(Unit)) { 164 OS << '%' << Register::virtReg2Index(Unit); 165 } else { 166 OS << printRegUnit(Unit, TRI); 167 } 168 }); 169 } 170 171 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo, 172 const TargetRegisterInfo *TRI) { 173 return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) { 174 if (RegInfo.getRegClassOrNull(Reg)) 175 OS << StringRef(TRI->getRegClassName(RegInfo.getRegClass(Reg))).lower(); 176 else if (RegInfo.getRegBankOrNull(Reg)) 177 OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower(); 178 else { 179 OS << "_"; 180 assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) && 181 "Generic registers must have a valid type"); 182 } 183 }); 184 } 185 186 } // end namespace llvm 187 188 /// getAllocatableClass - Return the maximal subclass of the given register 189 /// class that is alloctable, or NULL. 190 const TargetRegisterClass * 191 TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const { 192 if (!RC || RC->isAllocatable()) 193 return RC; 194 195 for (BitMaskClassIterator It(RC->getSubClassMask(), *this); It.isValid(); 196 ++It) { 197 const TargetRegisterClass *SubRC = getRegClass(It.getID()); 198 if (SubRC->isAllocatable()) 199 return SubRC; 200 } 201 return nullptr; 202 } 203 204 template <typename TypeT> 205 static const TargetRegisterClass * 206 getMinimalPhysRegClass(const TargetRegisterInfo *TRI, MCRegister Reg, 207 TypeT Ty) { 208 static_assert(std::is_same_v<TypeT, MVT> || std::is_same_v<TypeT, LLT>); 209 assert(Reg.isPhysical() && "reg must be a physical register"); 210 211 bool IsDefault = [&]() { 212 if constexpr (std::is_same_v<TypeT, MVT>) 213 return Ty == MVT::Other; 214 else 215 return !Ty.isValid(); 216 }(); 217 218 // Pick the most sub register class of the right type that contains 219 // this physreg. 220 const TargetRegisterClass *BestRC = nullptr; 221 for (const TargetRegisterClass *RC : TRI->regclasses()) { 222 if ((IsDefault || TRI->isTypeLegalForClass(*RC, Ty)) && RC->contains(Reg) && 223 (!BestRC || BestRC->hasSubClass(RC))) 224 BestRC = RC; 225 } 226 227 if constexpr (std::is_same_v<TypeT, MVT>) 228 assert(BestRC && "Couldn't find the register class"); 229 return BestRC; 230 } 231 232 template <typename TypeT> 233 static const TargetRegisterClass * 234 getCommonMinimalPhysRegClass(const TargetRegisterInfo *TRI, MCRegister Reg1, 235 MCRegister Reg2, TypeT Ty) { 236 static_assert(std::is_same_v<TypeT, MVT> || std::is_same_v<TypeT, LLT>); 237 assert(Reg1.isPhysical() && Reg2.isPhysical() && 238 "Reg1/Reg2 must be a physical register"); 239 240 bool IsDefault = [&]() { 241 if constexpr (std::is_same_v<TypeT, MVT>) 242 return Ty == MVT::Other; 243 else 244 return !Ty.isValid(); 245 }(); 246 247 // Pick the most sub register class of the right type that contains 248 // this physreg. 249 const TargetRegisterClass *BestRC = nullptr; 250 for (const TargetRegisterClass *RC : TRI->regclasses()) { 251 if ((IsDefault || TRI->isTypeLegalForClass(*RC, Ty)) && 252 RC->contains(Reg1, Reg2) && (!BestRC || BestRC->hasSubClass(RC))) 253 BestRC = RC; 254 } 255 256 if constexpr (std::is_same_v<TypeT, MVT>) 257 assert(BestRC && "Couldn't find the register class"); 258 return BestRC; 259 } 260 261 const TargetRegisterClass * 262 TargetRegisterInfo::getMinimalPhysRegClass(MCRegister Reg, MVT VT) const { 263 return ::getMinimalPhysRegClass(this, Reg, VT); 264 } 265 266 const TargetRegisterClass *TargetRegisterInfo::getCommonMinimalPhysRegClass( 267 MCRegister Reg1, MCRegister Reg2, MVT VT) const { 268 return ::getCommonMinimalPhysRegClass(this, Reg1, Reg2, VT); 269 } 270 271 const TargetRegisterClass * 272 TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister Reg, LLT Ty) const { 273 return ::getMinimalPhysRegClass(this, Reg, Ty); 274 } 275 276 const TargetRegisterClass *TargetRegisterInfo::getCommonMinimalPhysRegClassLLT( 277 MCRegister Reg1, MCRegister Reg2, LLT Ty) const { 278 return ::getCommonMinimalPhysRegClass(this, Reg1, Reg2, Ty); 279 } 280 281 /// getAllocatableSetForRC - Toggle the bits that represent allocatable 282 /// registers for the specific register class. 283 static void getAllocatableSetForRC(const MachineFunction &MF, 284 const TargetRegisterClass *RC, BitVector &R){ 285 assert(RC->isAllocatable() && "invalid for nonallocatable sets"); 286 ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF); 287 for (MCPhysReg PR : Order) 288 R.set(PR); 289 } 290 291 BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF, 292 const TargetRegisterClass *RC) const { 293 BitVector Allocatable(getNumRegs()); 294 if (RC) { 295 // A register class with no allocatable subclass returns an empty set. 296 const TargetRegisterClass *SubClass = getAllocatableClass(RC); 297 if (SubClass) 298 getAllocatableSetForRC(MF, SubClass, Allocatable); 299 } else { 300 for (const TargetRegisterClass *C : regclasses()) 301 if (C->isAllocatable()) 302 getAllocatableSetForRC(MF, C, Allocatable); 303 } 304 305 // Mask out the reserved registers 306 const MachineRegisterInfo &MRI = MF.getRegInfo(); 307 const BitVector &Reserved = MRI.getReservedRegs(); 308 Allocatable.reset(Reserved); 309 310 return Allocatable; 311 } 312 313 static inline 314 const TargetRegisterClass *firstCommonClass(const uint32_t *A, 315 const uint32_t *B, 316 const TargetRegisterInfo *TRI) { 317 for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32) 318 if (unsigned Common = *A++ & *B++) 319 return TRI->getRegClass(I + llvm::countr_zero(Common)); 320 return nullptr; 321 } 322 323 const TargetRegisterClass * 324 TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A, 325 const TargetRegisterClass *B) const { 326 // First take care of the trivial cases. 327 if (A == B) 328 return A; 329 if (!A || !B) 330 return nullptr; 331 332 // Register classes are ordered topologically, so the largest common 333 // sub-class it the common sub-class with the smallest ID. 334 return firstCommonClass(A->getSubClassMask(), B->getSubClassMask(), this); 335 } 336 337 const TargetRegisterClass * 338 TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A, 339 const TargetRegisterClass *B, 340 unsigned Idx) const { 341 assert(A && B && "Missing register class"); 342 assert(Idx && "Bad sub-register index"); 343 344 // Find Idx in the list of super-register indices. 345 for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI) 346 if (RCI.getSubReg() == Idx) 347 // The bit mask contains all register classes that are projected into B 348 // by Idx. Find a class that is also a sub-class of A. 349 return firstCommonClass(RCI.getMask(), A->getSubClassMask(), this); 350 return nullptr; 351 } 352 353 const TargetRegisterClass *TargetRegisterInfo:: 354 getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA, 355 const TargetRegisterClass *RCB, unsigned SubB, 356 unsigned &PreA, unsigned &PreB) const { 357 assert(RCA && SubA && RCB && SubB && "Invalid arguments"); 358 359 // Search all pairs of sub-register indices that project into RCA and RCB 360 // respectively. This is quadratic, but usually the sets are very small. On 361 // most targets like X86, there will only be a single sub-register index 362 // (e.g., sub_16bit projecting into GR16). 363 // 364 // The worst case is a register class like DPR on ARM. 365 // We have indices dsub_0..dsub_7 projecting into that class. 366 // 367 // It is very common that one register class is a sub-register of the other. 368 // Arrange for RCA to be the larger register so the answer will be found in 369 // the first iteration. This makes the search linear for the most common 370 // case. 371 const TargetRegisterClass *BestRC = nullptr; 372 unsigned *BestPreA = &PreA; 373 unsigned *BestPreB = &PreB; 374 if (getRegSizeInBits(*RCA) < getRegSizeInBits(*RCB)) { 375 std::swap(RCA, RCB); 376 std::swap(SubA, SubB); 377 std::swap(BestPreA, BestPreB); 378 } 379 380 // Also terminate the search one we have found a register class as small as 381 // RCA. 382 unsigned MinSize = getRegSizeInBits(*RCA); 383 384 for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) { 385 unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA); 386 for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) { 387 // Check if a common super-register class exists for this index pair. 388 const TargetRegisterClass *RC = 389 firstCommonClass(IA.getMask(), IB.getMask(), this); 390 if (!RC || getRegSizeInBits(*RC) < MinSize) 391 continue; 392 393 // The indexes must compose identically: PreA+SubA == PreB+SubB. 394 unsigned FinalB = composeSubRegIndices(IB.getSubReg(), SubB); 395 if (FinalA != FinalB) 396 continue; 397 398 // Is RC a better candidate than BestRC? 399 if (BestRC && getRegSizeInBits(*RC) >= getRegSizeInBits(*BestRC)) 400 continue; 401 402 // Yes, RC is the smallest super-register seen so far. 403 BestRC = RC; 404 *BestPreA = IA.getSubReg(); 405 *BestPreB = IB.getSubReg(); 406 407 // Bail early if we reached MinSize. We won't find a better candidate. 408 if (getRegSizeInBits(*BestRC) == MinSize) 409 return BestRC; 410 } 411 } 412 return BestRC; 413 } 414 415 /// Check if the registers defined by the pair (RegisterClass, SubReg) 416 /// share the same register file. 417 static bool shareSameRegisterFile(const TargetRegisterInfo &TRI, 418 const TargetRegisterClass *DefRC, 419 unsigned DefSubReg, 420 const TargetRegisterClass *SrcRC, 421 unsigned SrcSubReg) { 422 // Same register class. 423 if (DefRC == SrcRC) 424 return true; 425 426 // Both operands are sub registers. Check if they share a register class. 427 unsigned SrcIdx, DefIdx; 428 if (SrcSubReg && DefSubReg) { 429 return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg, 430 SrcIdx, DefIdx) != nullptr; 431 } 432 433 // At most one of the register is a sub register, make it Src to avoid 434 // duplicating the test. 435 if (!SrcSubReg) { 436 std::swap(DefSubReg, SrcSubReg); 437 std::swap(DefRC, SrcRC); 438 } 439 440 // One of the register is a sub register, check if we can get a superclass. 441 if (SrcSubReg) 442 return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != nullptr; 443 444 // Plain copy. 445 return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr; 446 } 447 448 bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC, 449 unsigned DefSubReg, 450 const TargetRegisterClass *SrcRC, 451 unsigned SrcSubReg) const { 452 // If this source does not incur a cross register bank copy, use it. 453 return shareSameRegisterFile(*this, DefRC, DefSubReg, SrcRC, SrcSubReg); 454 } 455 456 // Compute target-independent register allocator hints to help eliminate copies. 457 bool TargetRegisterInfo::getRegAllocationHints( 458 Register VirtReg, ArrayRef<MCPhysReg> Order, 459 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 460 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 461 const MachineRegisterInfo &MRI = MF.getRegInfo(); 462 const std::pair<unsigned, SmallVector<Register, 4>> *Hints_MRI = 463 MRI.getRegAllocationHints(VirtReg); 464 465 if (!Hints_MRI) 466 return false; 467 468 SmallSet<Register, 32> HintedRegs; 469 // First hint may be a target hint. 470 bool Skip = (Hints_MRI->first != 0); 471 for (auto Reg : Hints_MRI->second) { 472 if (Skip) { 473 Skip = false; 474 continue; 475 } 476 477 // Target-independent hints are either a physical or a virtual register. 478 Register Phys = Reg; 479 if (VRM && Phys.isVirtual()) 480 Phys = VRM->getPhys(Phys); 481 482 // Don't add the same reg twice (Hints_MRI may contain multiple virtual 483 // registers allocated to the same physreg). 484 if (!HintedRegs.insert(Phys).second) 485 continue; 486 // Check that Phys is a valid hint in VirtReg's register class. 487 if (!Phys.isPhysical()) 488 continue; 489 if (MRI.isReserved(Phys)) 490 continue; 491 // Check that Phys is in the allocation order. We shouldn't heed hints 492 // from VirtReg's register class if they aren't in the allocation order. The 493 // target probably has a reason for removing the register. 494 if (!is_contained(Order, Phys)) 495 continue; 496 497 // All clear, tell the register allocator to prefer this register. 498 Hints.push_back(Phys); 499 } 500 return false; 501 } 502 503 bool TargetRegisterInfo::isCalleeSavedPhysReg( 504 MCRegister PhysReg, const MachineFunction &MF) const { 505 if (!PhysReg) 506 return false; 507 const uint32_t *callerPreservedRegs = 508 getCallPreservedMask(MF, MF.getFunction().getCallingConv()); 509 if (callerPreservedRegs) { 510 assert(PhysReg.isPhysical() && "Expected physical register"); 511 return (callerPreservedRegs[PhysReg.id() / 32] >> PhysReg.id() % 32) & 1; 512 } 513 return false; 514 } 515 516 bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const { 517 return MF.getFrameInfo().isStackRealignable(); 518 } 519 520 bool TargetRegisterInfo::shouldRealignStack(const MachineFunction &MF) const { 521 return MF.getFrameInfo().shouldRealignStack(); 522 } 523 524 bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0, 525 const uint32_t *mask1) const { 526 unsigned N = (getNumRegs()+31) / 32; 527 for (unsigned I = 0; I < N; ++I) 528 if ((mask0[I] & mask1[I]) != mask0[I]) 529 return false; 530 return true; 531 } 532 533 TypeSize 534 TargetRegisterInfo::getRegSizeInBits(Register Reg, 535 const MachineRegisterInfo &MRI) const { 536 const TargetRegisterClass *RC{}; 537 if (Reg.isPhysical()) { 538 // The size is not directly available for physical registers. 539 // Instead, we need to access a register class that contains Reg and 540 // get the size of that register class. 541 RC = getMinimalPhysRegClass(Reg); 542 assert(RC && "Unable to deduce the register class"); 543 return getRegSizeInBits(*RC); 544 } 545 LLT Ty = MRI.getType(Reg); 546 if (Ty.isValid()) 547 return Ty.getSizeInBits(); 548 549 // Since Reg is not a generic register, it may have a register class. 550 RC = MRI.getRegClass(Reg); 551 assert(RC && "Unable to deduce the register class"); 552 return getRegSizeInBits(*RC); 553 } 554 555 bool TargetRegisterInfo::getCoveringSubRegIndexes( 556 const TargetRegisterClass *RC, LaneBitmask LaneMask, 557 SmallVectorImpl<unsigned> &NeededIndexes) const { 558 SmallVector<unsigned, 8> PossibleIndexes; 559 unsigned BestIdx = 0; 560 unsigned BestCover = 0; 561 562 for (unsigned Idx = 1, E = getNumSubRegIndices(); Idx < E; ++Idx) { 563 // Is this index even compatible with the given class? 564 if (getSubClassWithSubReg(RC, Idx) != RC) 565 continue; 566 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx); 567 // Early exit if we found a perfect match. 568 if (SubRegMask == LaneMask) { 569 BestIdx = Idx; 570 break; 571 } 572 573 // The index must not cover any lanes outside \p LaneMask. 574 if ((SubRegMask & ~LaneMask).any()) 575 continue; 576 577 unsigned PopCount = SubRegMask.getNumLanes(); 578 PossibleIndexes.push_back(Idx); 579 if (PopCount > BestCover) { 580 BestCover = PopCount; 581 BestIdx = Idx; 582 } 583 } 584 585 // Abort if we cannot possibly implement the COPY with the given indexes. 586 if (BestIdx == 0) 587 return false; 588 589 NeededIndexes.push_back(BestIdx); 590 591 // Greedy heuristic: Keep iterating keeping the best covering subreg index 592 // each time. 593 LaneBitmask LanesLeft = LaneMask & ~getSubRegIndexLaneMask(BestIdx); 594 while (LanesLeft.any()) { 595 unsigned BestIdx = 0; 596 int BestCover = std::numeric_limits<int>::min(); 597 for (unsigned Idx : PossibleIndexes) { 598 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx); 599 // Early exit if we found a perfect match. 600 if (SubRegMask == LanesLeft) { 601 BestIdx = Idx; 602 break; 603 } 604 605 // Do not cover already-covered lanes to avoid creating cycles 606 // in copy bundles (= bundle contains copies that write to the 607 // registers). 608 if ((SubRegMask & ~LanesLeft).any()) 609 continue; 610 611 // Try to cover as many of the remaining lanes as possible. 612 const int Cover = (SubRegMask & LanesLeft).getNumLanes(); 613 if (Cover > BestCover) { 614 BestCover = Cover; 615 BestIdx = Idx; 616 } 617 } 618 619 if (BestIdx == 0) 620 return false; // Impossible to handle 621 622 NeededIndexes.push_back(BestIdx); 623 624 LanesLeft &= ~getSubRegIndexLaneMask(BestIdx); 625 } 626 627 return BestIdx; 628 } 629 630 unsigned TargetRegisterInfo::getSubRegIdxSize(unsigned Idx) const { 631 assert(Idx && Idx < getNumSubRegIndices() && 632 "This is not a subregister index"); 633 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Size; 634 } 635 636 unsigned TargetRegisterInfo::getSubRegIdxOffset(unsigned Idx) const { 637 assert(Idx && Idx < getNumSubRegIndices() && 638 "This is not a subregister index"); 639 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Offset; 640 } 641 642 Register 643 TargetRegisterInfo::lookThruCopyLike(Register SrcReg, 644 const MachineRegisterInfo *MRI) const { 645 while (true) { 646 const MachineInstr *MI = MRI->getVRegDef(SrcReg); 647 if (!MI->isCopyLike()) 648 return SrcReg; 649 650 Register CopySrcReg; 651 if (MI->isCopy()) 652 CopySrcReg = MI->getOperand(1).getReg(); 653 else { 654 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike"); 655 CopySrcReg = MI->getOperand(2).getReg(); 656 } 657 658 if (!CopySrcReg.isVirtual()) 659 return CopySrcReg; 660 661 SrcReg = CopySrcReg; 662 } 663 } 664 665 Register TargetRegisterInfo::lookThruSingleUseCopyChain( 666 Register SrcReg, const MachineRegisterInfo *MRI) const { 667 while (true) { 668 const MachineInstr *MI = MRI->getVRegDef(SrcReg); 669 // Found the real definition, return it if it has a single use. 670 if (!MI->isCopyLike()) 671 return MRI->hasOneNonDBGUse(SrcReg) ? SrcReg : Register(); 672 673 Register CopySrcReg; 674 if (MI->isCopy()) 675 CopySrcReg = MI->getOperand(1).getReg(); 676 else { 677 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike"); 678 CopySrcReg = MI->getOperand(2).getReg(); 679 } 680 681 // Continue only if the next definition in the chain is for a virtual 682 // register that has a single use. 683 if (!CopySrcReg.isVirtual() || !MRI->hasOneNonDBGUse(CopySrcReg)) 684 return Register(); 685 686 SrcReg = CopySrcReg; 687 } 688 } 689 690 void TargetRegisterInfo::getOffsetOpcodes( 691 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const { 692 assert(!Offset.getScalable() && "Scalable offsets are not handled"); 693 DIExpression::appendOffset(Ops, Offset.getFixed()); 694 } 695 696 DIExpression * 697 TargetRegisterInfo::prependOffsetExpression(const DIExpression *Expr, 698 unsigned PrependFlags, 699 const StackOffset &Offset) const { 700 assert((PrependFlags & 701 ~(DIExpression::DerefBefore | DIExpression::DerefAfter | 702 DIExpression::StackValue | DIExpression::EntryValue)) == 0 && 703 "Unsupported prepend flag"); 704 SmallVector<uint64_t, 16> OffsetExpr; 705 if (PrependFlags & DIExpression::DerefBefore) 706 OffsetExpr.push_back(dwarf::DW_OP_deref); 707 getOffsetOpcodes(Offset, OffsetExpr); 708 if (PrependFlags & DIExpression::DerefAfter) 709 OffsetExpr.push_back(dwarf::DW_OP_deref); 710 return DIExpression::prependOpcodes(Expr, OffsetExpr, 711 PrependFlags & DIExpression::StackValue, 712 PrependFlags & DIExpression::EntryValue); 713 } 714 715 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 716 LLVM_DUMP_METHOD 717 void TargetRegisterInfo::dumpReg(Register Reg, unsigned SubRegIndex, 718 const TargetRegisterInfo *TRI) { 719 dbgs() << printReg(Reg, TRI, SubRegIndex) << "\n"; 720 } 721 #endif 722