1 //===-- ARMSubtarget.cpp - ARM Subtarget Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ARM specific subclass of TargetSubtargetInfo. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "ARM.h" 14 15 #include "ARMCallLowering.h" 16 #include "ARMFrameLowering.h" 17 #include "ARMInstrInfo.h" 18 #include "ARMLegalizerInfo.h" 19 #include "ARMRegisterBankInfo.h" 20 #include "ARMSubtarget.h" 21 #include "ARMTargetMachine.h" 22 #include "MCTargetDesc/ARMMCTargetDesc.h" 23 #include "Thumb1FrameLowering.h" 24 #include "Thumb1InstrInfo.h" 25 #include "Thumb2InstrInfo.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunction.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalValue.h" 33 #include "llvm/MC/MCAsmInfo.h" 34 #include "llvm/MC/MCTargetOptions.h" 35 #include "llvm/Support/CodeGen.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Target/TargetOptions.h" 38 #include "llvm/TargetParser/ARMTargetParser.h" 39 #include "llvm/TargetParser/Triple.h" 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "arm-subtarget" 44 45 #define GET_SUBTARGETINFO_TARGET_DESC 46 #define GET_SUBTARGETINFO_CTOR 47 #include "ARMGenSubtargetInfo.inc" 48 49 static cl::opt<bool> 50 UseFusedMulOps("arm-use-mulops", 51 cl::init(true), cl::Hidden); 52 53 enum ITMode { 54 DefaultIT, 55 RestrictedIT 56 }; 57 58 static cl::opt<ITMode> 59 IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), 60 cl::values(clEnumValN(DefaultIT, "arm-default-it", 61 "Generate any type of IT block"), 62 clEnumValN(RestrictedIT, "arm-restrict-it", 63 "Disallow complex IT blocks"))); 64 65 /// ForceFastISel - Use the fast-isel, even for subtargets where it is not 66 /// currently supported (for testing only). 67 static cl::opt<bool> 68 ForceFastISel("arm-force-fast-isel", 69 cl::init(false), cl::Hidden); 70 71 /// initializeSubtargetDependencies - Initializes using a CPU and feature string 72 /// so that we can use initializer lists for subtarget initialization. 73 ARMSubtarget &ARMSubtarget::initializeSubtargetDependencies(StringRef CPU, 74 StringRef FS) { 75 initializeEnvironment(); 76 initSubtargetFeatures(CPU, FS); 77 return *this; 78 } 79 80 ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU, 81 StringRef FS) { 82 ARMSubtarget &STI = initializeSubtargetDependencies(CPU, FS); 83 if (STI.isThumb1Only()) 84 return (ARMFrameLowering *)new Thumb1FrameLowering(STI); 85 86 return new ARMFrameLowering(STI); 87 } 88 89 ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU, 90 const std::string &FS, 91 const ARMBaseTargetMachine &TM, bool IsLittle, 92 bool MinSize) 93 : ARMGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), 94 UseMulOps(UseFusedMulOps), CPUString(CPU), OptMinSize(MinSize), 95 IsLittle(IsLittle), TargetTriple(TT), Options(TM.Options), TM(TM), 96 FrameLowering(initializeFrameLowering(CPU, FS)), 97 // At this point initializeSubtargetDependencies has been called so 98 // we can query directly. 99 InstrInfo(isThumb1Only() 100 ? (ARMBaseInstrInfo *)new Thumb1InstrInfo(*this) 101 : !isThumb() 102 ? (ARMBaseInstrInfo *)new ARMInstrInfo(*this) 103 : (ARMBaseInstrInfo *)new Thumb2InstrInfo(*this)), 104 TLInfo(TM, *this) { 105 106 CallLoweringInfo.reset(new ARMCallLowering(*getTargetLowering())); 107 Legalizer.reset(new ARMLegalizerInfo(*this)); 108 109 auto *RBI = new ARMRegisterBankInfo(*getRegisterInfo()); 110 111 // FIXME: At this point, we can't rely on Subtarget having RBI. 112 // It's awkward to mix passing RBI and the Subtarget; should we pass 113 // TII/TRI as well? 114 InstSelector.reset(createARMInstructionSelector(TM, *this, *RBI)); 115 116 RegBankInfo.reset(RBI); 117 } 118 119 const CallLowering *ARMSubtarget::getCallLowering() const { 120 return CallLoweringInfo.get(); 121 } 122 123 InstructionSelector *ARMSubtarget::getInstructionSelector() const { 124 return InstSelector.get(); 125 } 126 127 const LegalizerInfo *ARMSubtarget::getLegalizerInfo() const { 128 return Legalizer.get(); 129 } 130 131 const RegisterBankInfo *ARMSubtarget::getRegBankInfo() const { 132 return RegBankInfo.get(); 133 } 134 135 bool ARMSubtarget::isXRaySupported() const { 136 // We don't currently suppport Thumb, but Windows requires Thumb. 137 return hasV6Ops() && hasARMOps() && !isTargetWindows(); 138 } 139 140 void ARMSubtarget::initializeEnvironment() { 141 // MCAsmInfo isn't always present (e.g. in opt) so we can't initialize this 142 // directly from it, but we can try to make sure they're consistent when both 143 // available. 144 UseSjLjEH = (isTargetDarwin() && !isTargetWatchABI() && 145 Options.ExceptionModel == ExceptionHandling::None) || 146 Options.ExceptionModel == ExceptionHandling::SjLj; 147 assert((!TM.getMCAsmInfo() || 148 (TM.getMCAsmInfo()->getExceptionHandlingType() == 149 ExceptionHandling::SjLj) == UseSjLjEH) && 150 "inconsistent sjlj choice between CodeGen and MC"); 151 } 152 153 void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) { 154 if (CPUString.empty()) { 155 CPUString = "generic"; 156 157 if (isTargetDarwin()) { 158 StringRef ArchName = TargetTriple.getArchName(); 159 ARM::ArchKind AK = ARM::parseArch(ArchName); 160 if (AK == ARM::ArchKind::ARMV7S) 161 // Default to the Swift CPU when targeting armv7s/thumbv7s. 162 CPUString = "swift"; 163 else if (AK == ARM::ArchKind::ARMV7K) 164 // Default to the Cortex-a7 CPU when targeting armv7k/thumbv7k. 165 // ARMv7k does not use SjLj exception handling. 166 CPUString = "cortex-a7"; 167 } 168 } 169 170 // Insert the architecture feature derived from the target triple into the 171 // feature string. This is important for setting features that are implied 172 // based on the architecture version. 173 std::string ArchFS = ARM_MC::ParseARMTriple(TargetTriple, CPUString); 174 if (!FS.empty()) { 175 if (!ArchFS.empty()) 176 ArchFS = (Twine(ArchFS) + "," + FS).str(); 177 else 178 ArchFS = std::string(FS); 179 } 180 ParseSubtargetFeatures(CPUString, /*TuneCPU*/ CPUString, ArchFS); 181 182 // FIXME: This used enable V6T2 support implicitly for Thumb2 mode. 183 // Assert this for now to make the change obvious. 184 assert(hasV6T2Ops() || !hasThumb2()); 185 186 if (genExecuteOnly()) { 187 // Execute only support for >= v8-M Baseline requires movt support 188 if (hasV8MBaselineOps()) 189 NoMovt = false; 190 if (!hasV6MOps()) 191 report_fatal_error("Cannot generate execute-only code for this target"); 192 } 193 194 // Keep a pointer to static instruction cost data for the specified CPU. 195 SchedModel = getSchedModelForCPU(CPUString); 196 197 // Initialize scheduling itinerary for the specified CPU. 198 InstrItins = getInstrItineraryForCPU(CPUString); 199 200 // FIXME: this is invalid for WindowsCE 201 if (isTargetWindows()) 202 NoARM = true; 203 204 if (isAAPCS_ABI()) 205 stackAlignment = Align(8); 206 if (isTargetNaCl() || isAAPCS16_ABI()) 207 stackAlignment = Align(16); 208 209 // FIXME: Completely disable sibcall for Thumb1 since ThumbRegisterInfo:: 210 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 211 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 212 // support in the assembler and linker to be used. This would need to be 213 // fixed to fully support tail calls in Thumb1. 214 // 215 // For ARMv8-M, we /do/ implement tail calls. Doing this is tricky for v8-M 216 // baseline, since the LDM/POP instruction on Thumb doesn't take LR. This 217 // means if we need to reload LR, it takes extra instructions, which outweighs 218 // the value of the tail call; but here we don't know yet whether LR is going 219 // to be used. We take the optimistic approach of generating the tail call and 220 // perhaps taking a hit if we need to restore the LR. 221 222 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 223 // but we need to make sure there are enough registers; the only valid 224 // registers are the 4 used for parameters. We don't currently do this 225 // case. 226 227 SupportsTailCall = !isThumb1Only() || hasV8MBaselineOps(); 228 229 if (isTargetMachO() && isTargetIOS() && getTargetTriple().isOSVersionLT(5, 0)) 230 SupportsTailCall = false; 231 232 switch (IT) { 233 case DefaultIT: 234 RestrictIT = false; 235 break; 236 case RestrictedIT: 237 RestrictIT = true; 238 break; 239 } 240 241 // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default. 242 const FeatureBitset &Bits = getFeatureBits(); 243 if ((Bits[ARM::ProcA5] || Bits[ARM::ProcA8]) && // Where this matters 244 (Options.UnsafeFPMath || isTargetDarwin())) 245 HasNEONForFP = true; 246 247 if (isRWPI()) 248 ReserveR9 = true; 249 250 // If MVEVectorCostFactor is still 0 (has not been set to anything else), default it to 2 251 if (MVEVectorCostFactor == 0) 252 MVEVectorCostFactor = 2; 253 254 // FIXME: Teach TableGen to deal with these instead of doing it manually here. 255 switch (ARMProcFamily) { 256 case Others: 257 case CortexA5: 258 break; 259 case CortexA7: 260 LdStMultipleTiming = DoubleIssue; 261 break; 262 case CortexA8: 263 LdStMultipleTiming = DoubleIssue; 264 break; 265 case CortexA9: 266 LdStMultipleTiming = DoubleIssueCheckUnalignedAccess; 267 PreISelOperandLatencyAdjustment = 1; 268 break; 269 case CortexA12: 270 break; 271 case CortexA15: 272 MaxInterleaveFactor = 2; 273 PreISelOperandLatencyAdjustment = 1; 274 PartialUpdateClearance = 12; 275 break; 276 case CortexA17: 277 case CortexA32: 278 case CortexA35: 279 case CortexA53: 280 case CortexA55: 281 case CortexA57: 282 case CortexA72: 283 case CortexA73: 284 case CortexA75: 285 case CortexA76: 286 case CortexA77: 287 case CortexA78: 288 case CortexA78AE: 289 case CortexA78C: 290 case CortexA510: 291 case CortexA710: 292 case CortexR4: 293 case CortexR5: 294 case CortexR7: 295 case CortexM3: 296 case CortexM55: 297 case CortexM7: 298 case CortexM85: 299 case CortexR52: 300 case CortexR52plus: 301 case CortexX1: 302 case CortexX1C: 303 break; 304 case Exynos: 305 LdStMultipleTiming = SingleIssuePlusExtras; 306 MaxInterleaveFactor = 4; 307 if (!isThumb()) 308 PreferBranchLogAlignment = 3; 309 break; 310 case Kryo: 311 break; 312 case Krait: 313 PreISelOperandLatencyAdjustment = 1; 314 break; 315 case NeoverseV1: 316 break; 317 case Swift: 318 MaxInterleaveFactor = 2; 319 LdStMultipleTiming = SingleIssuePlusExtras; 320 PreISelOperandLatencyAdjustment = 1; 321 PartialUpdateClearance = 12; 322 break; 323 } 324 } 325 326 bool ARMSubtarget::isTargetHardFloat() const { return TM.isTargetHardFloat(); } 327 328 bool ARMSubtarget::isAPCS_ABI() const { 329 assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); 330 return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_APCS; 331 } 332 bool ARMSubtarget::isAAPCS_ABI() const { 333 assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); 334 return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS || 335 TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16; 336 } 337 bool ARMSubtarget::isAAPCS16_ABI() const { 338 assert(TM.TargetABI != ARMBaseTargetMachine::ARM_ABI_UNKNOWN); 339 return TM.TargetABI == ARMBaseTargetMachine::ARM_ABI_AAPCS16; 340 } 341 342 bool ARMSubtarget::isROPI() const { 343 return TM.getRelocationModel() == Reloc::ROPI || 344 TM.getRelocationModel() == Reloc::ROPI_RWPI; 345 } 346 bool ARMSubtarget::isRWPI() const { 347 return TM.getRelocationModel() == Reloc::RWPI || 348 TM.getRelocationModel() == Reloc::ROPI_RWPI; 349 } 350 351 bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue *GV) const { 352 if (!TM.shouldAssumeDSOLocal(GV)) 353 return true; 354 355 // 32 bit macho has no relocation for a-b if a is undefined, even if b is in 356 // the section that is being relocated. This means we have to use o load even 357 // for GVs that are known to be local to the dso. 358 if (isTargetMachO() && TM.isPositionIndependent() && 359 (GV->isDeclarationForLinker() || GV->hasCommonLinkage())) 360 return true; 361 362 return false; 363 } 364 365 bool ARMSubtarget::isGVInGOT(const GlobalValue *GV) const { 366 return isTargetELF() && TM.isPositionIndependent() && !GV->isDSOLocal(); 367 } 368 369 unsigned ARMSubtarget::getMispredictionPenalty() const { 370 return SchedModel.MispredictPenalty; 371 } 372 373 bool ARMSubtarget::enableMachineScheduler() const { 374 // The MachineScheduler can increase register usage, so we use more high 375 // registers and end up with more T2 instructions that cannot be converted to 376 // T1 instructions. At least until we do better at converting to thumb1 377 // instructions, on cortex-m at Oz where we are size-paranoid, don't use the 378 // Machine scheduler, relying on the DAG register pressure scheduler instead. 379 if (isMClass() && hasMinSize()) 380 return false; 381 // Enable the MachineScheduler before register allocation for subtargets 382 // with the use-misched feature. 383 return useMachineScheduler(); 384 } 385 386 bool ARMSubtarget::enableSubRegLiveness() const { 387 // Enable SubRegLiveness for MVE to better optimize s subregs for mqpr regs 388 // and q subregs for qqqqpr regs. 389 return hasMVEIntegerOps(); 390 } 391 392 bool ARMSubtarget::enableMachinePipeliner() const { 393 // Enable the MachinePipeliner before register allocation for subtargets 394 // with the use-mipipeliner feature. 395 return getSchedModel().hasInstrSchedModel() && useMachinePipeliner(); 396 } 397 398 bool ARMSubtarget::useDFAforSMS() const { return false; } 399 400 // This overrides the PostRAScheduler bit in the SchedModel for any CPU. 401 bool ARMSubtarget::enablePostRAScheduler() const { 402 if (enableMachineScheduler()) 403 return false; 404 if (disablePostRAScheduler()) 405 return false; 406 // Thumb1 cores will generally not benefit from post-ra scheduling 407 return !isThumb1Only(); 408 } 409 410 bool ARMSubtarget::enablePostRAMachineScheduler() const { 411 if (!enableMachineScheduler()) 412 return false; 413 if (disablePostRAScheduler()) 414 return false; 415 return !isThumb1Only(); 416 } 417 418 bool ARMSubtarget::useStride4VFPs() const { 419 // For general targets, the prologue can grow when VFPs are allocated with 420 // stride 4 (more vpush instructions). But WatchOS uses a compact unwind 421 // format which it's more important to get right. 422 return isTargetWatchABI() || 423 (useWideStrideVFP() && !OptMinSize); 424 } 425 426 bool ARMSubtarget::useMovt() const { 427 // NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit 428 // immediates as it is inherently position independent, and may be out of 429 // range otherwise. 430 return !NoMovt && hasV8MBaselineOps() && 431 (isTargetWindows() || !OptMinSize || genExecuteOnly()); 432 } 433 434 bool ARMSubtarget::useFastISel() const { 435 // Enable fast-isel for any target, for testing only. 436 if (ForceFastISel) 437 return true; 438 439 // Limit fast-isel to the targets that are or have been tested. 440 if (!hasV6Ops()) 441 return false; 442 443 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl. 444 return TM.Options.EnableFastISel && 445 ((isTargetMachO() && !isThumb1Only()) || 446 (isTargetLinux() && !isThumb()) || (isTargetNaCl() && !isThumb())); 447 } 448 449 unsigned ARMSubtarget::getGPRAllocationOrder(const MachineFunction &MF) const { 450 // The GPR register class has multiple possible allocation orders, with 451 // tradeoffs preferred by different sub-architectures and optimisation goals. 452 // The allocation orders are: 453 // 0: (the default tablegen order, not used) 454 // 1: r14, r0-r13 455 // 2: r0-r7 456 // 3: r0-r7, r12, lr, r8-r11 457 // Note that the register allocator will change this order so that 458 // callee-saved registers are used later, as they require extra work in the 459 // prologue/epilogue (though we sometimes override that). 460 461 // For thumb1-only targets, only the low registers are allocatable. 462 if (isThumb1Only()) 463 return 2; 464 465 // Allocate low registers first, so we can select more 16-bit instructions. 466 // We also (in ignoreCSRForAllocationOrder) override the default behaviour 467 // with regards to callee-saved registers, because pushing extra registers is 468 // much cheaper (in terms of code size) than using high registers. After 469 // that, we allocate r12 (doesn't need to be saved), lr (saving it means we 470 // can return with the pop, don't need an extra "bx lr") and then the rest of 471 // the high registers. 472 if (isThumb2() && MF.getFunction().hasMinSize()) 473 return 3; 474 475 // Otherwise, allocate in the default order, using LR first because saving it 476 // allows a shorter epilogue sequence. 477 return 1; 478 } 479 480 bool ARMSubtarget::ignoreCSRForAllocationOrder(const MachineFunction &MF, 481 MCRegister PhysReg) const { 482 // To minimize code size in Thumb2, we prefer the usage of low regs (lower 483 // cost per use) so we can use narrow encoding. By default, caller-saved 484 // registers (e.g. lr, r12) are always allocated first, regardless of 485 // their cost per use. When optForMinSize, we prefer the low regs even if 486 // they are CSR because usually push/pop can be folded into existing ones. 487 return isThumb2() && MF.getFunction().hasMinSize() && 488 ARM::GPRRegClass.contains(PhysReg); 489 } 490 491 ARMSubtarget::PushPopSplitVariation 492 ARMSubtarget::getPushPopSplitVariation(const MachineFunction &MF) const { 493 const Function &F = MF.getFunction(); 494 const MachineFrameInfo &MFI = MF.getFrameInfo(); 495 const std::vector<CalleeSavedInfo> CSI = 496 MF.getFrameInfo().getCalleeSavedInfo(); 497 498 // Thumb1 always splits the pushes at R7, because the Thumb1 push instruction 499 // cannot use high registers except for lr. 500 if (isThumb1Only()) 501 return SplitR7; 502 503 // If R7 is the frame pointer, we must split at R7 to ensure that the 504 // previous frame pointer (R7) and return address (LR) are adjacent on the 505 // stack, to form a valid frame record. 506 if (getFramePointerReg() == ARM::R7 && 507 MF.getTarget().Options.FramePointerIsReserved(MF)) 508 return SplitR7; 509 510 // Returns SplitR11WindowsSEH when the stack pointer needs to be 511 // restored from the frame pointer r11 + an offset and Windows CFI is enabled. 512 // This stack unwinding cannot be expressed with SEH unwind opcodes when done 513 // with a single push, making it necessary to split the push into r4-r10, and 514 // another containing r11+lr. 515 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI() && 516 F.needsUnwindTableEntry() && 517 (MFI.hasVarSizedObjects() || getRegisterInfo()->hasStackRealignment(MF))) 518 return SplitR11WindowsSEH; 519 520 // Returns SplitR11AAPCSSignRA when the frame pointer is R11, requiring R11 521 // and LR to be adjacent on the stack, and branch signing is enabled, 522 // requiring R12 to be on the stack. 523 if (MF.getInfo<ARMFunctionInfo>()->shouldSignReturnAddress() && 524 getFramePointerReg() == ARM::R11 && 525 MF.getTarget().Options.FramePointerIsReserved(MF)) 526 return SplitR11AAPCSSignRA; 527 return NoSplit; 528 } 529