1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the base ARM implementation of TargetRegisterInfo class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMBaseRegisterInfo.h" 14 #include "ARM.h" 15 #include "ARMBaseInstrInfo.h" 16 #include "ARMFrameLowering.h" 17 #include "ARMMachineFunctionInfo.h" 18 #include "ARMSubtarget.h" 19 #include "MCTargetDesc/ARMAddressingModes.h" 20 #include "MCTargetDesc/ARMBaseInfo.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineConstantPool.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineFunction.h" 28 #include "llvm/CodeGen/MachineInstr.h" 29 #include "llvm/CodeGen/MachineInstrBuilder.h" 30 #include "llvm/CodeGen/MachineOperand.h" 31 #include "llvm/CodeGen/MachineRegisterInfo.h" 32 #include "llvm/CodeGen/RegisterScavenging.h" 33 #include "llvm/CodeGen/TargetInstrInfo.h" 34 #include "llvm/CodeGen/TargetRegisterInfo.h" 35 #include "llvm/CodeGen/VirtRegMap.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DebugLoc.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/Type.h" 41 #include "llvm/MC/MCInstrDesc.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Target/TargetMachine.h" 46 #include "llvm/Target/TargetOptions.h" 47 #include <cassert> 48 #include <utility> 49 50 #define DEBUG_TYPE "arm-register-info" 51 52 #define GET_REGINFO_TARGET_DESC 53 #include "ARMGenRegisterInfo.inc" 54 55 using namespace llvm; 56 57 ARMBaseRegisterInfo::ARMBaseRegisterInfo() 58 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC) { 59 ARM_MC::initLLVMToCVRegMapping(this); 60 } 61 62 const MCPhysReg* 63 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { 64 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>(); 65 ARMSubtarget::PushPopSplitVariation PushPopSplit = 66 STI.getPushPopSplitVariation(*MF); 67 const Function &F = MF->getFunction(); 68 69 if (F.getCallingConv() == CallingConv::GHC) { 70 // GHC set of callee saved regs is empty as all those regs are 71 // used for passing STG regs around 72 return CSR_NoRegs_SaveList; 73 } else if (PushPopSplit == ARMSubtarget::SplitR11WindowsSEH) { 74 return CSR_Win_SplitFP_SaveList; 75 } else if (F.getCallingConv() == CallingConv::CFGuard_Check) { 76 return CSR_Win_AAPCS_CFGuard_Check_SaveList; 77 } else if (F.getCallingConv() == CallingConv::SwiftTail) { 78 return STI.isTargetDarwin() ? CSR_iOS_SwiftTail_SaveList 79 : (PushPopSplit == ARMSubtarget::SplitR7 80 ? CSR_ATPCS_SplitPush_SwiftTail_SaveList 81 : CSR_AAPCS_SwiftTail_SaveList); 82 } else if (F.hasFnAttribute("interrupt")) { 83 if (STI.isMClass()) { 84 // M-class CPUs have hardware which saves the registers needed to allow a 85 // function conforming to the AAPCS to function as a handler. 86 return PushPopSplit == ARMSubtarget::SplitR7 87 ? CSR_ATPCS_SplitPush_SaveList 88 : CSR_AAPCS_SaveList; 89 } else if (F.getFnAttribute("interrupt").getValueAsString() == "FIQ") { 90 // Fast interrupt mode gives the handler a private copy of R8-R14, so less 91 // need to be saved to restore user-mode state. 92 return CSR_FIQ_SaveList; 93 } else { 94 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by 95 // exception handling. 96 return CSR_GenericInt_SaveList; 97 } 98 } 99 100 if (STI.getTargetLowering()->supportSwiftError() && 101 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { 102 if (STI.isTargetDarwin()) 103 return CSR_iOS_SwiftError_SaveList; 104 105 return PushPopSplit == ARMSubtarget::SplitR7 106 ? CSR_ATPCS_SplitPush_SwiftError_SaveList 107 : CSR_AAPCS_SwiftError_SaveList; 108 } 109 110 if (STI.isTargetDarwin() && F.getCallingConv() == CallingConv::CXX_FAST_TLS) 111 return MF->getInfo<ARMFunctionInfo>()->isSplitCSR() 112 ? CSR_iOS_CXX_TLS_PE_SaveList 113 : CSR_iOS_CXX_TLS_SaveList; 114 115 if (STI.isTargetDarwin()) 116 return CSR_iOS_SaveList; 117 118 if (PushPopSplit == ARMSubtarget::SplitR7) 119 return STI.createAAPCSFrameChain() ? CSR_AAPCS_SplitPush_R7_SaveList 120 : CSR_ATPCS_SplitPush_SaveList; 121 122 if (PushPopSplit == ARMSubtarget::SplitR11AAPCSSignRA) 123 return CSR_AAPCS_SplitPush_R11_SaveList; 124 125 return CSR_AAPCS_SaveList; 126 } 127 128 const MCPhysReg *ARMBaseRegisterInfo::getCalleeSavedRegsViaCopy( 129 const MachineFunction *MF) const { 130 assert(MF && "Invalid MachineFunction pointer."); 131 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS && 132 MF->getInfo<ARMFunctionInfo>()->isSplitCSR()) 133 return CSR_iOS_CXX_TLS_ViaCopy_SaveList; 134 return nullptr; 135 } 136 137 const uint32_t * 138 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF, 139 CallingConv::ID CC) const { 140 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 141 if (CC == CallingConv::GHC) 142 // This is academic because all GHC calls are (supposed to be) tail calls 143 return CSR_NoRegs_RegMask; 144 if (CC == CallingConv::CFGuard_Check) 145 return CSR_Win_AAPCS_CFGuard_Check_RegMask; 146 if (CC == CallingConv::SwiftTail) { 147 return STI.isTargetDarwin() ? CSR_iOS_SwiftTail_RegMask 148 : CSR_AAPCS_SwiftTail_RegMask; 149 } 150 if (STI.getTargetLowering()->supportSwiftError() && 151 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 152 return STI.isTargetDarwin() ? CSR_iOS_SwiftError_RegMask 153 : CSR_AAPCS_SwiftError_RegMask; 154 155 if (STI.isTargetDarwin() && CC == CallingConv::CXX_FAST_TLS) 156 return CSR_iOS_CXX_TLS_RegMask; 157 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; 158 } 159 160 const uint32_t* 161 ARMBaseRegisterInfo::getNoPreservedMask() const { 162 return CSR_NoRegs_RegMask; 163 } 164 165 const uint32_t * 166 ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const { 167 assert(MF.getSubtarget<ARMSubtarget>().isTargetDarwin() && 168 "only know about special TLS call on Darwin"); 169 return CSR_iOS_TLSCall_RegMask; 170 } 171 172 const uint32_t * 173 ARMBaseRegisterInfo::getSjLjDispatchPreservedMask(const MachineFunction &MF) const { 174 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 175 if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only()) 176 return CSR_NoRegs_RegMask; 177 else 178 return CSR_FPRegs_RegMask; 179 } 180 181 const uint32_t * 182 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF, 183 CallingConv::ID CC) const { 184 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 185 // This should return a register mask that is the same as that returned by 186 // getCallPreservedMask but that additionally preserves the register used for 187 // the first i32 argument (which must also be the register used to return a 188 // single i32 return value) 189 // 190 // In case that the calling convention does not use the same register for 191 // both or otherwise does not want to enable this optimization, the function 192 // should return NULL 193 if (CC == CallingConv::GHC) 194 // This is academic because all GHC calls are (supposed to be) tail calls 195 return nullptr; 196 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask 197 : CSR_AAPCS_ThisReturn_RegMask; 198 } 199 200 ArrayRef<MCPhysReg> ARMBaseRegisterInfo::getIntraCallClobberedRegs( 201 const MachineFunction *MF) const { 202 static const MCPhysReg IntraCallClobberedRegs[] = {ARM::R12}; 203 return ArrayRef<MCPhysReg>(IntraCallClobberedRegs); 204 } 205 206 BitVector ARMBaseRegisterInfo:: 207 getReservedRegs(const MachineFunction &MF) const { 208 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 209 const ARMFrameLowering *TFI = getFrameLowering(MF); 210 211 // FIXME: avoid re-calculating this every time. 212 BitVector Reserved(getNumRegs()); 213 markSuperRegs(Reserved, ARM::SP); 214 markSuperRegs(Reserved, ARM::PC); 215 markSuperRegs(Reserved, ARM::FPSCR); 216 markSuperRegs(Reserved, ARM::APSR_NZCV); 217 if (TFI->isFPReserved(MF)) 218 markSuperRegs(Reserved, STI.getFramePointerReg()); 219 if (hasBasePointer(MF)) 220 markSuperRegs(Reserved, BasePtr); 221 // Some targets reserve R9. 222 if (STI.isR9Reserved()) 223 markSuperRegs(Reserved, ARM::R9); 224 // Reserve D16-D31 if the subtarget doesn't support them. 225 if (!STI.hasD32()) { 226 static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!"); 227 for (unsigned R = 0; R < 16; ++R) 228 markSuperRegs(Reserved, ARM::D16 + R); 229 } 230 const TargetRegisterClass &RC = ARM::GPRPairRegClass; 231 for (unsigned Reg : RC) 232 for (MCPhysReg S : subregs(Reg)) 233 if (Reserved.test(S)) 234 markSuperRegs(Reserved, Reg); 235 // For v8.1m architecture 236 markSuperRegs(Reserved, ARM::ZR); 237 238 assert(checkAllSuperRegsMarked(Reserved)); 239 return Reserved; 240 } 241 242 bool ARMBaseRegisterInfo:: 243 isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const { 244 return !getReservedRegs(MF).test(PhysReg); 245 } 246 247 bool ARMBaseRegisterInfo::isInlineAsmReadOnlyReg(const MachineFunction &MF, 248 unsigned PhysReg) const { 249 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 250 const ARMFrameLowering *TFI = getFrameLowering(MF); 251 252 BitVector Reserved(getNumRegs()); 253 markSuperRegs(Reserved, ARM::PC); 254 if (TFI->isFPReserved(MF)) 255 markSuperRegs(Reserved, STI.getFramePointerReg()); 256 if (hasBasePointer(MF)) 257 markSuperRegs(Reserved, BasePtr); 258 assert(checkAllSuperRegsMarked(Reserved)); 259 return Reserved.test(PhysReg); 260 } 261 262 const TargetRegisterClass * 263 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, 264 const MachineFunction &MF) const { 265 unsigned SuperID = RC->getID(); 266 auto I = RC->superclasses().begin(); 267 auto E = RC->superclasses().end(); 268 do { 269 switch (SuperID) { 270 case ARM::GPRRegClassID: 271 case ARM::SPRRegClassID: 272 case ARM::DPRRegClassID: 273 case ARM::GPRPairRegClassID: 274 return getRegClass(SuperID); 275 case ARM::QPRRegClassID: 276 case ARM::QQPRRegClassID: 277 case ARM::QQQQPRRegClassID: 278 if (MF.getSubtarget<ARMSubtarget>().hasNEON()) 279 return getRegClass(SuperID); 280 break; 281 case ARM::MQPRRegClassID: 282 case ARM::MQQPRRegClassID: 283 case ARM::MQQQQPRRegClassID: 284 if (MF.getSubtarget<ARMSubtarget>().hasMVEIntegerOps()) 285 return getRegClass(SuperID); 286 break; 287 } 288 SuperID = (I != E) ? *I++ : ~0U; 289 } while (SuperID != ~0U); 290 return RC; 291 } 292 293 const TargetRegisterClass * 294 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind) 295 const { 296 return &ARM::GPRRegClass; 297 } 298 299 const TargetRegisterClass * 300 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { 301 if (RC == &ARM::CCRRegClass) 302 return &ARM::rGPRRegClass; // Can't copy CCR registers. 303 if (RC == &ARM::cl_FPSCR_NZCVRegClass) 304 return &ARM::rGPRRegClass; 305 return RC; 306 } 307 308 unsigned 309 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, 310 MachineFunction &MF) const { 311 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 312 const ARMFrameLowering *TFI = getFrameLowering(MF); 313 314 switch (RC->getID()) { 315 default: 316 return 0; 317 case ARM::tGPRRegClassID: { 318 // hasFP ends up calling getMaxCallFrameComputed() which may not be 319 // available when getPressureLimit() is called as part of 320 // ScheduleDAGRRList. 321 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 322 ? TFI->hasFP(MF) : true; 323 return 5 - HasFP; 324 } 325 case ARM::GPRRegClassID: { 326 bool HasFP = MF.getFrameInfo().isMaxCallFrameSizeComputed() 327 ? TFI->hasFP(MF) : true; 328 return 10 - HasFP - (STI.isR9Reserved() ? 1 : 0); 329 } 330 case ARM::SPRRegClassID: // Currently not used as 'rep' register class. 331 case ARM::DPRRegClassID: 332 return 32 - 10; 333 } 334 } 335 336 // Get the other register in a GPRPair. 337 static MCRegister getPairedGPR(MCRegister Reg, bool Odd, 338 const MCRegisterInfo *RI) { 339 for (MCPhysReg Super : RI->superregs(Reg)) 340 if (ARM::GPRPairRegClass.contains(Super)) 341 return RI->getSubReg(Super, Odd ? ARM::gsub_1 : ARM::gsub_0); 342 return MCRegister(); 343 } 344 345 // Resolve the RegPairEven / RegPairOdd register allocator hints. 346 bool ARMBaseRegisterInfo::getRegAllocationHints( 347 Register VirtReg, ArrayRef<MCPhysReg> Order, 348 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF, 349 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { 350 const MachineRegisterInfo &MRI = MF.getRegInfo(); 351 std::pair<unsigned, Register> Hint = MRI.getRegAllocationHint(VirtReg); 352 353 unsigned Odd; 354 switch (Hint.first) { 355 case ARMRI::RegPairEven: 356 Odd = 0; 357 break; 358 case ARMRI::RegPairOdd: 359 Odd = 1; 360 break; 361 case ARMRI::RegLR: 362 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 363 if (MRI.getRegClass(VirtReg)->contains(ARM::LR)) 364 Hints.push_back(ARM::LR); 365 return false; 366 default: 367 return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM); 368 } 369 370 // This register should preferably be even (Odd == 0) or odd (Odd == 1). 371 // Check if the other part of the pair has already been assigned, and provide 372 // the paired register as the first hint. 373 Register Paired = Hint.second; 374 if (!Paired) 375 return false; 376 377 Register PairedPhys; 378 if (Paired.isPhysical()) { 379 PairedPhys = Paired; 380 } else if (VRM && VRM->hasPhys(Paired)) { 381 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this); 382 } 383 384 // First prefer the paired physreg. 385 if (PairedPhys && is_contained(Order, PairedPhys)) 386 Hints.push_back(PairedPhys); 387 388 // Then prefer even or odd registers. 389 for (MCPhysReg Reg : Order) { 390 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd) 391 continue; 392 // Don't provide hints that are paired to a reserved register. 393 MCRegister Paired = getPairedGPR(Reg, !Odd, this); 394 if (!Paired || MRI.isReserved(Paired)) 395 continue; 396 Hints.push_back(Reg); 397 } 398 return false; 399 } 400 401 void ARMBaseRegisterInfo::updateRegAllocHint(Register Reg, Register NewReg, 402 MachineFunction &MF) const { 403 MachineRegisterInfo *MRI = &MF.getRegInfo(); 404 std::pair<unsigned, Register> Hint = MRI->getRegAllocationHint(Reg); 405 if ((Hint.first == ARMRI::RegPairOdd || Hint.first == ARMRI::RegPairEven) && 406 Hint.second.isVirtual()) { 407 // If 'Reg' is one of the even / odd register pair and it's now changed 408 // (e.g. coalesced) into a different register. The other register of the 409 // pair allocation hint must be updated to reflect the relationship 410 // change. 411 Register OtherReg = Hint.second; 412 Hint = MRI->getRegAllocationHint(OtherReg); 413 // Make sure the pair has not already divorced. 414 if (Hint.second == Reg) { 415 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg); 416 if (NewReg.isVirtual()) 417 MRI->setRegAllocationHint(NewReg, 418 Hint.first == ARMRI::RegPairOdd 419 ? ARMRI::RegPairEven 420 : ARMRI::RegPairOdd, 421 OtherReg); 422 } 423 } 424 } 425 426 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const { 427 const MachineFrameInfo &MFI = MF.getFrameInfo(); 428 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 429 const ARMFrameLowering *TFI = getFrameLowering(MF); 430 431 // If we have stack realignment and VLAs, we have no pointer to use to 432 // access the stack. If we have stack realignment, and a large call frame, 433 // we have no place to allocate the emergency spill slot. 434 if (hasStackRealignment(MF) && !TFI->hasReservedCallFrame(MF)) 435 return true; 436 437 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited 438 // negative range for ldr/str (255), and Thumb1 is positive offsets only. 439 // 440 // It's going to be better to use the SP or Base Pointer instead. When there 441 // are variable sized objects, we can't reference off of the SP, so we 442 // reserve a Base Pointer. 443 // 444 // For Thumb2, estimate whether a negative offset from the frame pointer 445 // will be sufficient to reach the whole stack frame. If a function has a 446 // smallish frame, it's less likely to have lots of spills and callee saved 447 // space, so it's all more likely to be within range of the frame pointer. 448 // If it's wrong, the scavenger will still enable access to work, it just 449 // won't be optimal. (We should always be able to reach the emergency 450 // spill slot from the frame pointer.) 451 if (AFI->isThumb2Function() && MFI.hasVarSizedObjects() && 452 MFI.getLocalFrameSize() >= 128) 453 return true; 454 // For Thumb1, if sp moves, nothing is in range, so force a base pointer. 455 // This is necessary for correctness in cases where we need an emergency 456 // spill slot. (In Thumb1, we can't use a negative offset from the frame 457 // pointer.) 458 if (AFI->isThumb1OnlyFunction() && !TFI->hasReservedCallFrame(MF)) 459 return true; 460 return false; 461 } 462 463 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const { 464 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 465 const ARMFrameLowering *TFI = getFrameLowering(MF); 466 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 467 // We can't realign the stack if: 468 // 1. Dynamic stack realignment is explicitly disabled, 469 // 2. There are VLAs in the function and the base pointer is disabled. 470 if (!TargetRegisterInfo::canRealignStack(MF)) 471 return false; 472 // Stack realignment requires a frame pointer. If we already started 473 // register allocation with frame pointer elimination, it is too late now. 474 if (!MRI->canReserveReg(STI.getFramePointerReg())) 475 return false; 476 // We may also need a base pointer if there are dynamic allocas or stack 477 // pointer adjustments around calls. 478 if (TFI->hasReservedCallFrame(MF)) 479 return true; 480 // A base pointer is required and allowed. Check that it isn't too late to 481 // reserve it. 482 return MRI->canReserveReg(BasePtr); 483 } 484 485 bool ARMBaseRegisterInfo:: 486 cannotEliminateFrame(const MachineFunction &MF) const { 487 const MachineFrameInfo &MFI = MF.getFrameInfo(); 488 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI.adjustsStack()) 489 return true; 490 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || 491 hasStackRealignment(MF); 492 } 493 494 Register 495 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { 496 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>(); 497 const ARMFrameLowering *TFI = getFrameLowering(MF); 498 499 if (TFI->hasFP(MF)) 500 return STI.getFramePointerReg(); 501 return ARM::SP; 502 } 503 504 /// emitLoadConstPool - Emits a load from constpool to materialize the 505 /// specified immediate. 506 void ARMBaseRegisterInfo::emitLoadConstPool( 507 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, 508 const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val, 509 ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const { 510 MachineFunction &MF = *MBB.getParent(); 511 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 512 MachineConstantPool *ConstantPool = MF.getConstantPool(); 513 const Constant *C = 514 ConstantInt::get(Type::getInt32Ty(MF.getFunction().getContext()), Val); 515 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4)); 516 517 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp)) 518 .addReg(DestReg, getDefRegState(true), SubIdx) 519 .addConstantPoolIndex(Idx) 520 .addImm(0) 521 .add(predOps(Pred, PredReg)) 522 .setMIFlags(MIFlags); 523 } 524 525 bool ARMBaseRegisterInfo:: 526 requiresRegisterScavenging(const MachineFunction &MF) const { 527 return true; 528 } 529 530 bool ARMBaseRegisterInfo:: 531 requiresFrameIndexScavenging(const MachineFunction &MF) const { 532 return true; 533 } 534 535 bool ARMBaseRegisterInfo:: 536 requiresVirtualBaseRegisters(const MachineFunction &MF) const { 537 return true; 538 } 539 540 int64_t ARMBaseRegisterInfo:: 541 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const { 542 const MCInstrDesc &Desc = MI->getDesc(); 543 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 544 int64_t InstrOffs = 0; 545 int Scale = 1; 546 unsigned ImmIdx = 0; 547 switch (AddrMode) { 548 case ARMII::AddrModeT2_i8: 549 case ARMII::AddrModeT2_i8neg: 550 case ARMII::AddrModeT2_i8pos: 551 case ARMII::AddrModeT2_i12: 552 case ARMII::AddrMode_i12: 553 InstrOffs = MI->getOperand(Idx+1).getImm(); 554 Scale = 1; 555 break; 556 case ARMII::AddrMode5: { 557 // VFP address mode. 558 const MachineOperand &OffOp = MI->getOperand(Idx+1); 559 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm()); 560 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub) 561 InstrOffs = -InstrOffs; 562 Scale = 4; 563 break; 564 } 565 case ARMII::AddrMode2: 566 ImmIdx = Idx+2; 567 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm()); 568 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 569 InstrOffs = -InstrOffs; 570 break; 571 case ARMII::AddrMode3: 572 ImmIdx = Idx+2; 573 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm()); 574 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub) 575 InstrOffs = -InstrOffs; 576 break; 577 case ARMII::AddrModeT1_s: 578 ImmIdx = Idx+1; 579 InstrOffs = MI->getOperand(ImmIdx).getImm(); 580 Scale = 4; 581 break; 582 default: 583 llvm_unreachable("Unsupported addressing mode!"); 584 } 585 586 return InstrOffs * Scale; 587 } 588 589 /// needsFrameBaseReg - Returns true if the instruction's frame index 590 /// reference would be better served by a base register other than FP 591 /// or SP. Used by LocalStackFrameAllocation to determine which frame index 592 /// references it should create new base registers for. 593 bool ARMBaseRegisterInfo:: 594 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const { 595 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) { 596 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!"); 597 } 598 599 // It's the load/store FI references that cause issues, as it can be difficult 600 // to materialize the offset if it won't fit in the literal field. Estimate 601 // based on the size of the local frame and some conservative assumptions 602 // about the rest of the stack frame (note, this is pre-regalloc, so 603 // we don't know everything for certain yet) whether this offset is likely 604 // to be out of range of the immediate. Return true if so. 605 606 // We only generate virtual base registers for loads and stores, so 607 // return false for everything else. 608 unsigned Opc = MI->getOpcode(); 609 switch (Opc) { 610 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12: 611 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12: 612 case ARM::t2LDRi12: case ARM::t2LDRi8: 613 case ARM::t2STRi12: case ARM::t2STRi8: 614 case ARM::VLDRS: case ARM::VLDRD: 615 case ARM::VSTRS: case ARM::VSTRD: 616 case ARM::tSTRspi: case ARM::tLDRspi: 617 break; 618 default: 619 return false; 620 } 621 622 // Without a virtual base register, if the function has variable sized 623 // objects, all fixed-size local references will be via the frame pointer, 624 // Approximate the offset and see if it's legal for the instruction. 625 // Note that the incoming offset is based on the SP value at function entry, 626 // so it'll be negative. 627 MachineFunction &MF = *MI->getParent()->getParent(); 628 const ARMFrameLowering *TFI = getFrameLowering(MF); 629 MachineFrameInfo &MFI = MF.getFrameInfo(); 630 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 631 632 // Estimate an offset from the frame pointer. 633 // Conservatively assume all callee-saved registers get pushed. R4-R6 634 // will be earlier than the FP, so we ignore those. 635 // R7, LR 636 int64_t FPOffset = Offset - 8; 637 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15 638 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction()) 639 FPOffset -= 80; 640 // Estimate an offset from the stack pointer. 641 // The incoming offset is relating to the SP at the start of the function, 642 // but when we access the local it'll be relative to the SP after local 643 // allocation, so adjust our SP-relative offset by that allocation size. 644 Offset += MFI.getLocalFrameSize(); 645 // Assume that we'll have at least some spill slots allocated. 646 // FIXME: This is a total SWAG number. We should run some statistics 647 // and pick a real one. 648 Offset += 128; // 128 bytes of spill slots 649 650 // If there's a frame pointer and the addressing mode allows it, try using it. 651 // The FP is only available if there is no dynamic realignment. We 652 // don't know for sure yet whether we'll need that, so we guess based 653 // on whether there are any local variables that would trigger it. 654 if (TFI->hasFP(MF) && 655 !((MFI.getLocalFrameMaxAlign() > TFI->getStackAlign()) && 656 canRealignStack(MF))) { 657 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset)) 658 return false; 659 } 660 // If we can reference via the stack pointer, try that. 661 // FIXME: This (and the code that resolves the references) can be improved 662 // to only disallow SP relative references in the live range of 663 // the VLA(s). In practice, it's unclear how much difference that 664 // would make, but it may be worth doing. 665 if (!MFI.hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset)) 666 return false; 667 668 // The offset likely isn't legal, we want to allocate a virtual base register. 669 return true; 670 } 671 672 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to 673 /// be a pointer to FrameIdx at the beginning of the basic block. 674 Register 675 ARMBaseRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, 676 int FrameIdx, 677 int64_t Offset) const { 678 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>(); 679 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : 680 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri); 681 682 MachineBasicBlock::iterator Ins = MBB->begin(); 683 DebugLoc DL; // Defaults to "unknown" 684 if (Ins != MBB->end()) 685 DL = Ins->getDebugLoc(); 686 687 const MachineFunction &MF = *MBB->getParent(); 688 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 689 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 690 const MCInstrDesc &MCID = TII.get(ADDriOpc); 691 Register BaseReg = MRI.createVirtualRegister(&ARM::GPRRegClass); 692 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF)); 693 694 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg) 695 .addFrameIndex(FrameIdx).addImm(Offset); 696 697 if (!AFI->isThumb1OnlyFunction()) 698 MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); 699 700 return BaseReg; 701 } 702 703 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg, 704 int64_t Offset) const { 705 MachineBasicBlock &MBB = *MI.getParent(); 706 MachineFunction &MF = *MBB.getParent(); 707 const ARMBaseInstrInfo &TII = 708 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 709 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 710 int Off = Offset; // ARM doesn't need the general 64-bit offsets 711 unsigned i = 0; 712 713 assert(!AFI->isThumb1OnlyFunction() && 714 "This resolveFrameIndex does not support Thumb1!"); 715 716 while (!MI.getOperand(i).isFI()) { 717 ++i; 718 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); 719 } 720 bool Done = false; 721 if (!AFI->isThumbFunction()) 722 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII); 723 else { 724 assert(AFI->isThumb2Function()); 725 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII, this); 726 } 727 assert(Done && "Unable to resolve frame index!"); 728 (void)Done; 729 } 730 731 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, 732 Register BaseReg, 733 int64_t Offset) const { 734 const MCInstrDesc &Desc = MI->getDesc(); 735 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); 736 unsigned i = 0; 737 for (; !MI->getOperand(i).isFI(); ++i) 738 assert(i+1 < MI->getNumOperands() && "Instr doesn't have FrameIndex operand!"); 739 740 // AddrMode4 and AddrMode6 cannot handle any offset. 741 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6) 742 return Offset == 0; 743 744 unsigned NumBits = 0; 745 unsigned Scale = 1; 746 bool isSigned = true; 747 switch (AddrMode) { 748 case ARMII::AddrModeT2_i8: 749 case ARMII::AddrModeT2_i8pos: 750 case ARMII::AddrModeT2_i8neg: 751 case ARMII::AddrModeT2_i12: 752 // i8 supports only negative, and i12 supports only positive, so 753 // based on Offset sign, consider the appropriate instruction 754 Scale = 1; 755 if (Offset < 0) { 756 NumBits = 8; 757 Offset = -Offset; 758 } else { 759 NumBits = 12; 760 } 761 break; 762 case ARMII::AddrMode5: 763 // VFP address mode. 764 NumBits = 8; 765 Scale = 4; 766 break; 767 case ARMII::AddrMode_i12: 768 case ARMII::AddrMode2: 769 NumBits = 12; 770 break; 771 case ARMII::AddrMode3: 772 NumBits = 8; 773 break; 774 case ARMII::AddrModeT1_s: 775 NumBits = (BaseReg == ARM::SP ? 8 : 5); 776 Scale = 4; 777 isSigned = false; 778 break; 779 default: 780 llvm_unreachable("Unsupported addressing mode!"); 781 } 782 783 Offset += getFrameIndexInstrOffset(MI, i); 784 // Make sure the offset is encodable for instructions that scale the 785 // immediate. 786 if ((Offset & (Scale-1)) != 0) 787 return false; 788 789 if (isSigned && Offset < 0) 790 Offset = -Offset; 791 792 unsigned Mask = (1 << NumBits) - 1; 793 if ((unsigned)Offset <= Mask * Scale) 794 return true; 795 796 return false; 797 } 798 799 bool 800 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, 801 int SPAdj, unsigned FIOperandNum, 802 RegScavenger *RS) const { 803 MachineInstr &MI = *II; 804 MachineBasicBlock &MBB = *MI.getParent(); 805 MachineFunction &MF = *MBB.getParent(); 806 const ARMBaseInstrInfo &TII = 807 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo()); 808 const ARMFrameLowering *TFI = getFrameLowering(MF); 809 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 810 assert(!AFI->isThumb1OnlyFunction() && 811 "This eliminateFrameIndex does not support Thumb1!"); 812 int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); 813 Register FrameReg; 814 815 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj); 816 817 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the 818 // call frame setup/destroy instructions have already been eliminated. That 819 // means the stack pointer cannot be used to access the emergency spill slot 820 // when !hasReservedCallFrame(). 821 #ifndef NDEBUG 822 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){ 823 assert(TFI->hasReservedCallFrame(MF) && 824 "Cannot use SP to access the emergency spill slot in " 825 "functions without a reserved call frame"); 826 assert(!MF.getFrameInfo().hasVarSizedObjects() && 827 "Cannot use SP to access the emergency spill slot in " 828 "functions with variable sized frame objects"); 829 } 830 #endif // NDEBUG 831 832 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code"); 833 834 // Modify MI as necessary to handle as much of 'Offset' as possible 835 bool Done = false; 836 if (!AFI->isThumbFunction()) 837 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII); 838 else { 839 assert(AFI->isThumb2Function()); 840 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII, this); 841 } 842 if (Done) 843 return false; 844 845 // If we get here, the immediate doesn't fit into the instruction. We folded 846 // as much as possible above, handle the rest, providing a register that is 847 // SP+LargeImm. 848 assert( 849 (Offset || 850 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 || 851 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6 || 852 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7 || 853 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrModeT2_i7s2 || 854 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == 855 ARMII::AddrModeT2_i7s4) && 856 "This code isn't needed if offset already handled!"); 857 858 unsigned ScratchReg = 0; 859 int PIdx = MI.findFirstPredOperandIdx(); 860 ARMCC::CondCodes Pred = (PIdx == -1) 861 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); 862 Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg(); 863 864 const MCInstrDesc &MCID = MI.getDesc(); 865 const TargetRegisterClass *RegClass = 866 TII.getRegClass(MCID, FIOperandNum, this, *MI.getParent()->getParent()); 867 868 if (Offset == 0 && (FrameReg.isVirtual() || RegClass->contains(FrameReg))) 869 // Must be addrmode4/6. 870 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false); 871 else { 872 ScratchReg = MF.getRegInfo().createVirtualRegister(RegClass); 873 if (!AFI->isThumbFunction()) 874 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 875 Offset, Pred, PredReg, TII); 876 else { 877 assert(AFI->isThumb2Function()); 878 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, 879 Offset, Pred, PredReg, TII); 880 } 881 // Update the original instruction to use the scratch register. 882 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true); 883 } 884 return false; 885 } 886 887 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI, 888 const TargetRegisterClass *SrcRC, 889 unsigned SubReg, 890 const TargetRegisterClass *DstRC, 891 unsigned DstSubReg, 892 const TargetRegisterClass *NewRC, 893 LiveIntervals &LIS) const { 894 auto MBB = MI->getParent(); 895 auto MF = MBB->getParent(); 896 const MachineRegisterInfo &MRI = MF->getRegInfo(); 897 // If not copying into a sub-register this should be ok because we shouldn't 898 // need to split the reg. 899 if (!DstSubReg) 900 return true; 901 // Small registers don't frequently cause a problem, so we can coalesce them. 902 if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 && 903 getRegSizeInBits(*SrcRC) < 256) 904 return true; 905 906 auto NewRCWeight = 907 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC); 908 auto SrcRCWeight = 909 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC); 910 auto DstRCWeight = 911 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC); 912 // If the source register class is more expensive than the destination, the 913 // coalescing is probably profitable. 914 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight) 915 return true; 916 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight) 917 return true; 918 919 // If the register allocator isn't constrained, we can always allow coalescing 920 // unfortunately we don't know yet if we will be constrained. 921 // The goal of this heuristic is to restrict how many expensive registers 922 // we allow to coalesce in a given basic block. 923 auto AFI = MF->getInfo<ARMFunctionInfo>(); 924 auto It = AFI->getCoalescedWeight(MBB); 925 926 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: " 927 << It->second << "\n"); 928 LLVM_DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: " 929 << NewRCWeight.RegWeight << "\n"); 930 931 // This number is the largest round number that which meets the criteria: 932 // (1) addresses PR18825 933 // (2) generates better code in some test cases (like vldm-shed-a9.ll) 934 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC) 935 // In practice the SizeMultiplier will only factor in for straight line code 936 // that uses a lot of NEON vectors, which isn't terribly common. 937 unsigned SizeMultiplier = MBB->size()/100; 938 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1; 939 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) { 940 It->second += NewRCWeight.RegWeight; 941 return true; 942 } 943 return false; 944 } 945 946 bool ARMBaseRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC, 947 unsigned DefSubReg, 948 const TargetRegisterClass *SrcRC, 949 unsigned SrcSubReg) const { 950 // We can't extract an SPR from an arbitary DPR (as opposed to a DPR_VFP2). 951 if (DefRC == &ARM::SPRRegClass && DefSubReg == 0 && 952 SrcRC == &ARM::DPRRegClass && 953 (SrcSubReg == ARM::ssub_0 || SrcSubReg == ARM::ssub_1)) 954 return false; 955 956 return TargetRegisterInfo::shouldRewriteCopySrc(DefRC, DefSubReg, 957 SrcRC, SrcSubReg); 958 } 959