xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===-- PPCRegisterInfo.cpp - PowerPC Register Information ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the PowerPC implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PPCRegisterInfo.h"
15 #include "PPCFrameLowering.h"
16 #include "PPCInstrBuilder.h"
17 #include "PPCMachineFunctionInfo.h"
18 #include "PPCSubtarget.h"
19 #include "PPCTargetMachine.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineModuleInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/RegisterScavenging.h"
29 #include "llvm/CodeGen/TargetFrameLowering.h"
30 #include "llvm/CodeGen/TargetInstrInfo.h"
31 #include "llvm/IR/CallingConv.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Target/TargetMachine.h"
41 #include "llvm/Target/TargetOptions.h"
42 #include <cstdlib>
43 
44 using namespace llvm;
45 
46 #define DEBUG_TYPE "reginfo"
47 
48 #define GET_REGINFO_TARGET_DESC
49 #include "PPCGenRegisterInfo.inc"
50 
51 STATISTIC(InflateGPRC, "Number of gprc inputs for getLargestLegalClass");
52 STATISTIC(InflateGP8RC, "Number of g8rc inputs for getLargestLegalClass");
53 
54 static cl::opt<bool>
55 EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true),
56          cl::desc("Enable use of a base pointer for complex stack frames"));
57 
58 static cl::opt<bool>
59 AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false),
60          cl::desc("Force the use of a base pointer in every function"));
61 
62 static cl::opt<bool>
63 EnableGPRToVecSpills("ppc-enable-gpr-to-vsr-spills", cl::Hidden, cl::init(false),
64          cl::desc("Enable spills from gpr to vsr rather than stack"));
65 
66 static cl::opt<bool>
67 StackPtrConst("ppc-stack-ptr-caller-preserved",
68                 cl::desc("Consider R1 caller preserved so stack saves of "
69                          "caller preserved registers can be LICM candidates"),
70                 cl::init(true), cl::Hidden);
71 
72 static cl::opt<unsigned>
73 MaxCRBitSpillDist("ppc-max-crbit-spill-dist",
74                   cl::desc("Maximum search distance for definition of CR bit "
75                            "spill on ppc"),
76                   cl::Hidden, cl::init(100));
77 
78 // Copies/moves of physical accumulators are expensive operations
79 // that should be avoided whenever possible. MMA instructions are
80 // meant to be used in performance-sensitive computational kernels.
81 // This option is provided, at least for the time being, to give the
82 // user a tool to detect this expensive operation and either rework
83 // their code or report a compiler bug if that turns out to be the
84 // cause.
85 #ifndef NDEBUG
86 static cl::opt<bool>
87 ReportAccMoves("ppc-report-acc-moves",
88                cl::desc("Emit information about accumulator register spills "
89                         "and copies"),
90                cl::Hidden, cl::init(false));
91 #endif
92 
93 static unsigned offsetMinAlignForOpcode(unsigned OpC);
94 
PPCRegisterInfo(const PPCTargetMachine & TM)95 PPCRegisterInfo::PPCRegisterInfo(const PPCTargetMachine &TM)
96   : PPCGenRegisterInfo(TM.isPPC64() ? PPC::LR8 : PPC::LR,
97                        TM.isPPC64() ? 0 : 1,
98                        TM.isPPC64() ? 0 : 1),
99     TM(TM) {
100   ImmToIdxMap[PPC::LD]   = PPC::LDX;    ImmToIdxMap[PPC::STD]  = PPC::STDX;
101   ImmToIdxMap[PPC::LBZ]  = PPC::LBZX;   ImmToIdxMap[PPC::STB]  = PPC::STBX;
102   ImmToIdxMap[PPC::LHZ]  = PPC::LHZX;   ImmToIdxMap[PPC::LHA]  = PPC::LHAX;
103   ImmToIdxMap[PPC::LWZ]  = PPC::LWZX;   ImmToIdxMap[PPC::LWA]  = PPC::LWAX;
104   ImmToIdxMap[PPC::LFS]  = PPC::LFSX;   ImmToIdxMap[PPC::LFD]  = PPC::LFDX;
105   ImmToIdxMap[PPC::STH]  = PPC::STHX;   ImmToIdxMap[PPC::STW]  = PPC::STWX;
106   ImmToIdxMap[PPC::STFS] = PPC::STFSX;  ImmToIdxMap[PPC::STFD] = PPC::STFDX;
107   ImmToIdxMap[PPC::ADDI] = PPC::ADD4;
108   ImmToIdxMap[PPC::LWA_32] = PPC::LWAX_32;
109 
110   // 64-bit
111   ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8;
112   ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8;
113   ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8;
114   ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX;
115   ImmToIdxMap[PPC::ADDI8] = PPC::ADD8;
116 
117   // VSX
118   ImmToIdxMap[PPC::DFLOADf32] = PPC::LXSSPX;
119   ImmToIdxMap[PPC::DFLOADf64] = PPC::LXSDX;
120   ImmToIdxMap[PPC::SPILLTOVSR_LD] = PPC::SPILLTOVSR_LDX;
121   ImmToIdxMap[PPC::SPILLTOVSR_ST] = PPC::SPILLTOVSR_STX;
122   ImmToIdxMap[PPC::DFSTOREf32] = PPC::STXSSPX;
123   ImmToIdxMap[PPC::DFSTOREf64] = PPC::STXSDX;
124   ImmToIdxMap[PPC::LXV] = PPC::LXVX;
125   ImmToIdxMap[PPC::LXSD] = PPC::LXSDX;
126   ImmToIdxMap[PPC::LXSSP] = PPC::LXSSPX;
127   ImmToIdxMap[PPC::STXV] = PPC::STXVX;
128   ImmToIdxMap[PPC::STXSD] = PPC::STXSDX;
129   ImmToIdxMap[PPC::STXSSP] = PPC::STXSSPX;
130 
131   // SPE
132   ImmToIdxMap[PPC::EVLDD] = PPC::EVLDDX;
133   ImmToIdxMap[PPC::EVSTDD] = PPC::EVSTDDX;
134   ImmToIdxMap[PPC::SPESTW] = PPC::SPESTWX;
135   ImmToIdxMap[PPC::SPELWZ] = PPC::SPELWZX;
136 }
137 
138 /// getPointerRegClass - Return the register class to use to hold pointers.
139 /// This is used for addressing modes.
140 const TargetRegisterClass *
getPointerRegClass(const MachineFunction & MF,unsigned Kind) const141 PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
142                                                                        const {
143   // Note that PPCInstrInfo::FoldImmediate also directly uses this Kind value
144   // when it checks for ZERO folding.
145   if (Kind == 1) {
146     if (TM.isPPC64())
147       return &PPC::G8RC_NOX0RegClass;
148     return &PPC::GPRC_NOR0RegClass;
149   }
150 
151   if (TM.isPPC64())
152     return &PPC::G8RCRegClass;
153   return &PPC::GPRCRegClass;
154 }
155 
156 const MCPhysReg*
getCalleeSavedRegs(const MachineFunction * MF) const157 PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
158   const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>();
159   if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) {
160     if (!TM.isPPC64() && Subtarget.isAIXABI())
161       report_fatal_error("AnyReg unimplemented on 32-bit AIX.");
162     if (Subtarget.hasVSX()) {
163       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
164         return CSR_64_AllRegs_AIX_Dflt_VSX_SaveList;
165       return CSR_64_AllRegs_VSX_SaveList;
166     }
167     if (Subtarget.hasAltivec()) {
168       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
169         return CSR_64_AllRegs_AIX_Dflt_Altivec_SaveList;
170       return CSR_64_AllRegs_Altivec_SaveList;
171     }
172     return CSR_64_AllRegs_SaveList;
173   }
174 
175   // On PPC64, we might need to save r2 (but only if it is not reserved).
176   // We do not need to treat R2 as callee-saved when using PC-Relative calls
177   // because any direct uses of R2 will cause it to be reserved. If the function
178   // is a leaf or the only uses of R2 are implicit uses for calls, the calls
179   // will use the @notoc relocation which will cause this function to set the
180   // st_other bit to 1, thereby communicating to its caller that it arbitrarily
181   // clobbers the TOC.
182   bool SaveR2 = MF->getRegInfo().isAllocatable(PPC::X2) &&
183                 !Subtarget.isUsingPCRelativeCalls();
184 
185   // Cold calling convention CSRs.
186   if (MF->getFunction().getCallingConv() == CallingConv::Cold) {
187     if (Subtarget.isAIXABI())
188       report_fatal_error("Cold calling unimplemented on AIX.");
189     if (TM.isPPC64()) {
190       if (Subtarget.hasAltivec())
191         return SaveR2 ? CSR_SVR64_ColdCC_R2_Altivec_SaveList
192                       : CSR_SVR64_ColdCC_Altivec_SaveList;
193       return SaveR2 ? CSR_SVR64_ColdCC_R2_SaveList
194                     : CSR_SVR64_ColdCC_SaveList;
195     }
196     // 32-bit targets.
197     if (Subtarget.hasAltivec())
198       return CSR_SVR32_ColdCC_Altivec_SaveList;
199     else if (Subtarget.hasSPE())
200       return CSR_SVR32_ColdCC_SPE_SaveList;
201     return CSR_SVR32_ColdCC_SaveList;
202   }
203   // Standard calling convention CSRs.
204   if (TM.isPPC64()) {
205     if (Subtarget.hasAltivec() &&
206         (!Subtarget.isAIXABI() || TM.getAIXExtendedAltivecABI())) {
207       return SaveR2 ? CSR_PPC64_R2_Altivec_SaveList
208                     : CSR_PPC64_Altivec_SaveList;
209     }
210     return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList;
211   }
212   // 32-bit targets.
213   if (Subtarget.isAIXABI()) {
214     if (Subtarget.hasAltivec())
215       return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_Altivec_SaveList
216                                            : CSR_AIX32_SaveList;
217     return CSR_AIX32_SaveList;
218   }
219   if (Subtarget.hasAltivec())
220     return CSR_SVR432_Altivec_SaveList;
221   else if (Subtarget.hasSPE())
222     return CSR_SVR432_SPE_SaveList;
223   return CSR_SVR432_SaveList;
224 }
225 
226 const uint32_t *
getCallPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const227 PPCRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
228                                       CallingConv::ID CC) const {
229   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
230   if (CC == CallingConv::AnyReg) {
231     if (Subtarget.hasVSX()) {
232       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
233         return CSR_64_AllRegs_AIX_Dflt_VSX_RegMask;
234       return CSR_64_AllRegs_VSX_RegMask;
235     }
236     if (Subtarget.hasAltivec()) {
237       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
238         return CSR_64_AllRegs_AIX_Dflt_Altivec_RegMask;
239       return CSR_64_AllRegs_Altivec_RegMask;
240     }
241     return CSR_64_AllRegs_RegMask;
242   }
243 
244   if (Subtarget.isAIXABI()) {
245     return TM.isPPC64()
246                ? ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
247                       ? CSR_PPC64_Altivec_RegMask
248                       : CSR_PPC64_RegMask)
249                : ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
250                       ? CSR_AIX32_Altivec_RegMask
251                       : CSR_AIX32_RegMask);
252   }
253 
254   if (CC == CallingConv::Cold) {
255     return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_SVR64_ColdCC_Altivec_RegMask
256                                                   : CSR_SVR64_ColdCC_RegMask)
257                         : (Subtarget.hasAltivec() ? CSR_SVR32_ColdCC_Altivec_RegMask
258                                                   : (Subtarget.hasSPE()
259                                                   ? CSR_SVR32_ColdCC_SPE_RegMask
260                                                   : CSR_SVR32_ColdCC_RegMask));
261   }
262 
263   return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_PPC64_Altivec_RegMask
264                                                 : CSR_PPC64_RegMask)
265                       : (Subtarget.hasAltivec()
266                              ? CSR_SVR432_Altivec_RegMask
267                              : (Subtarget.hasSPE() ? CSR_SVR432_SPE_RegMask
268                                                    : CSR_SVR432_RegMask));
269 }
270 
271 const uint32_t*
getNoPreservedMask() const272 PPCRegisterInfo::getNoPreservedMask() const {
273   return CSR_NoRegs_RegMask;
274 }
275 
adjustStackMapLiveOutMask(uint32_t * Mask) const276 void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
277   for (unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM})
278     Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32));
279 }
280 
getReservedRegs(const MachineFunction & MF) const281 BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
282   BitVector Reserved(getNumRegs());
283   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
284   const PPCFrameLowering *TFI = getFrameLowering(MF);
285 
286   // The ZERO register is not really a register, but the representation of r0
287   // when used in instructions that treat r0 as the constant 0.
288   markSuperRegs(Reserved, PPC::ZERO);
289 
290   // The FP register is also not really a register, but is the representation
291   // of the frame pointer register used by ISD::FRAMEADDR.
292   markSuperRegs(Reserved, PPC::FP);
293 
294   // The BP register is also not really a register, but is the representation
295   // of the base pointer register used by setjmp.
296   markSuperRegs(Reserved, PPC::BP);
297 
298   // The counter registers must be reserved so that counter-based loops can
299   // be correctly formed (and the mtctr instructions are not DCE'd).
300   markSuperRegs(Reserved, PPC::CTR);
301   markSuperRegs(Reserved, PPC::CTR8);
302 
303   markSuperRegs(Reserved, PPC::R1);
304   markSuperRegs(Reserved, PPC::LR);
305   markSuperRegs(Reserved, PPC::LR8);
306   markSuperRegs(Reserved, PPC::RM);
307 
308   markSuperRegs(Reserved, PPC::VRSAVE);
309 
310   // The SVR4 ABI reserves r2 and r13
311   if (Subtarget.isSVR4ABI()) {
312     // We only reserve r2 if we need to use the TOC pointer. If we have no
313     // explicit uses of the TOC pointer (meaning we're a leaf function with
314     // no constant-pool loads, etc.) and we have no potential uses inside an
315     // inline asm block, then we can treat r2 has an ordinary callee-saved
316     // register.
317     const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
318     if (!TM.isPPC64() || FuncInfo->usesTOCBasePtr() || MF.hasInlineAsm())
319       markSuperRegs(Reserved, PPC::R2);  // System-reserved register
320     markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register
321   }
322 
323   // Always reserve r2 on AIX for now.
324   // TODO: Make r2 allocatable on AIX/XCOFF for some leaf functions.
325   if (Subtarget.isAIXABI())
326     markSuperRegs(Reserved, PPC::R2);  // System-reserved register
327 
328   // On PPC64, r13 is the thread pointer. Never allocate this register.
329   if (TM.isPPC64())
330     markSuperRegs(Reserved, PPC::R13);
331 
332   if (TFI->needsFP(MF))
333     markSuperRegs(Reserved, PPC::R31);
334 
335   bool IsPositionIndependent = TM.isPositionIndependent();
336   if (hasBasePointer(MF)) {
337     if (Subtarget.is32BitELFABI() && IsPositionIndependent)
338       markSuperRegs(Reserved, PPC::R29);
339     else
340       markSuperRegs(Reserved, PPC::R30);
341   }
342 
343   if (Subtarget.is32BitELFABI() && IsPositionIndependent)
344     markSuperRegs(Reserved, PPC::R30);
345 
346   // Reserve Altivec registers when Altivec is unavailable.
347   if (!Subtarget.hasAltivec())
348     for (TargetRegisterClass::iterator I = PPC::VRRCRegClass.begin(),
349          IE = PPC::VRRCRegClass.end(); I != IE; ++I)
350       markSuperRegs(Reserved, *I);
351 
352   if (Subtarget.isAIXABI() && Subtarget.hasAltivec() &&
353       !TM.getAIXExtendedAltivecABI()) {
354     //  In the AIX default Altivec ABI, vector registers VR20-VR31 are reserved
355     //  and cannot be used.
356     for (auto Reg : CSR_Altivec_SaveList) {
357       if (Reg == 0)
358         break;
359       markSuperRegs(Reserved, Reg);
360       for (MCRegAliasIterator AS(Reg, this, true); AS.isValid(); ++AS) {
361         Reserved.set(*AS);
362       }
363     }
364   }
365 
366   assert(checkAllSuperRegsMarked(Reserved));
367   return Reserved;
368 }
369 
requiresFrameIndexScavenging(const MachineFunction & MF) const370 bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
371   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
372   const PPCInstrInfo *InstrInfo =  Subtarget.getInstrInfo();
373   const MachineFrameInfo &MFI = MF.getFrameInfo();
374   const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo();
375 
376   LLVM_DEBUG(dbgs() << "requiresFrameIndexScavenging for " << MF.getName()
377                     << ".\n");
378   // If the callee saved info is invalid we have to default to true for safety.
379   if (!MFI.isCalleeSavedInfoValid()) {
380     LLVM_DEBUG(dbgs() << "TRUE - Invalid callee saved info.\n");
381     return true;
382   }
383 
384   // We will require the use of X-Forms because the frame is larger than what
385   // can be represented in signed 16 bits that fit in the immediate of a D-Form.
386   // If we need an X-Form then we need a register to store the address offset.
387   unsigned FrameSize = MFI.getStackSize();
388   // Signed 16 bits means that the FrameSize cannot be more than 15 bits.
389   if (FrameSize & ~0x7FFF) {
390     LLVM_DEBUG(dbgs() << "TRUE - Frame size is too large for D-Form.\n");
391     return true;
392   }
393 
394   // The callee saved info is valid so it can be traversed.
395   // Checking for registers that need saving that do not have load or store
396   // forms where the address offset is an immediate.
397   for (unsigned i = 0; i < Info.size(); i++) {
398     // If the spill is to a register no scavenging is required.
399     if (Info[i].isSpilledToReg())
400       continue;
401 
402     int FrIdx = Info[i].getFrameIdx();
403     unsigned Reg = Info[i].getReg();
404 
405     const TargetRegisterClass *RC = getMinimalPhysRegClass(Reg);
406     unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC);
407     if (!MFI.isFixedObjectIndex(FrIdx)) {
408       // This is not a fixed object. If it requires alignment then we may still
409       // need to use the XForm.
410       if (offsetMinAlignForOpcode(Opcode) > 1) {
411         LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
412                           << " for register " << printReg(Reg, this) << ".\n");
413         LLVM_DEBUG(dbgs() << "TRUE - Not fixed frame object that requires "
414                           << "alignment.\n");
415         return true;
416       }
417     }
418 
419     // This is eiher:
420     // 1) A fixed frame index object which we know are aligned so
421     // as long as we have a valid DForm/DSForm/DQForm (non XForm) we don't
422     // need to consider the alignment here.
423     // 2) A not fixed object but in that case we now know that the min required
424     // alignment is no more than 1 based on the previous check.
425     if (InstrInfo->isXFormMemOp(Opcode)) {
426       LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
427                         << " for register " << printReg(Reg, this) << ".\n");
428       LLVM_DEBUG(dbgs() << "TRUE - Memory operand is X-Form.\n");
429       return true;
430     }
431   }
432   LLVM_DEBUG(dbgs() << "FALSE - Scavenging is not required.\n");
433   return false;
434 }
435 
requiresVirtualBaseRegisters(const MachineFunction & MF) const436 bool PPCRegisterInfo::requiresVirtualBaseRegisters(
437     const MachineFunction &MF) const {
438   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
439   // Do not use virtual base registers when ROP protection is turned on.
440   // Virtual base registers break the layout of the local variable space and may
441   // push the ROP Hash location past the 512 byte range of the ROP store
442   // instruction.
443   return !Subtarget.hasROPProtect();
444 }
445 
isCallerPreservedPhysReg(MCRegister PhysReg,const MachineFunction & MF) const446 bool PPCRegisterInfo::isCallerPreservedPhysReg(MCRegister PhysReg,
447                                                const MachineFunction &MF) const {
448   assert(Register::isPhysicalRegister(PhysReg));
449   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
450   const MachineFrameInfo &MFI = MF.getFrameInfo();
451 
452   if (!Subtarget.is64BitELFABI() && !Subtarget.isAIXABI())
453     return false;
454   if (PhysReg == Subtarget.getTOCPointerRegister())
455     // X2/R2 is guaranteed to be preserved within a function if it is reserved.
456     // The reason it's reserved is that it's the TOC pointer (and the function
457     // uses the TOC). In functions where it isn't reserved (i.e. leaf functions
458     // with no TOC access), we can't claim that it is preserved.
459     return (getReservedRegs(MF).test(PhysReg));
460   if (StackPtrConst && PhysReg == Subtarget.getStackPointerRegister() &&
461       !MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment())
462     // The value of the stack pointer does not change within a function after
463     // the prologue and before the epilogue if there are no dynamic allocations
464     // and no inline asm which clobbers X1/R1.
465     return true;
466   return false;
467 }
468 
getRegPressureLimit(const TargetRegisterClass * RC,MachineFunction & MF) const469 unsigned PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
470                                               MachineFunction &MF) const {
471   const PPCFrameLowering *TFI = getFrameLowering(MF);
472   const unsigned DefaultSafety = 1;
473 
474   switch (RC->getID()) {
475   default:
476     return 0;
477   case PPC::G8RC_NOX0RegClassID:
478   case PPC::GPRC_NOR0RegClassID:
479   case PPC::SPERCRegClassID:
480   case PPC::G8RCRegClassID:
481   case PPC::GPRCRegClassID: {
482     unsigned FP = TFI->hasFP(MF) ? 1 : 0;
483     return 32 - FP - DefaultSafety;
484   }
485   case PPC::F4RCRegClassID:
486   case PPC::F8RCRegClassID:
487   case PPC::VSLRCRegClassID:
488     return 32 - DefaultSafety;
489   case PPC::VFRCRegClassID:
490   case PPC::VRRCRegClassID: {
491     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
492     // Vector registers VR20-VR31 are reserved and cannot be used in the default
493     // Altivec ABI on AIX.
494     if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI())
495       return 20 - DefaultSafety;
496   }
497     return 32 - DefaultSafety;
498   case PPC::VSFRCRegClassID:
499   case PPC::VSSRCRegClassID:
500   case PPC::VSRCRegClassID: {
501     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
502     if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI())
503       // Vector registers VR20-VR31 are reserved and cannot be used in the
504       // default Altivec ABI on AIX.
505       return 52 - DefaultSafety;
506   }
507     return 64 - DefaultSafety;
508   case PPC::CRRCRegClassID:
509     return 8 - DefaultSafety;
510   }
511 }
512 
513 const TargetRegisterClass *
getLargestLegalSuperClass(const TargetRegisterClass * RC,const MachineFunction & MF) const514 PPCRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
515                                            const MachineFunction &MF) const {
516   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
517   if (Subtarget.hasVSX()) {
518     // With VSX, we can inflate various sub-register classes to the full VSX
519     // register set.
520 
521     // For Power9 we allow the user to enable GPR to vector spills.
522     // FIXME: Currently limited to spilling GP8RC. A follow on patch will add
523     // support to spill GPRC.
524     if (TM.isELFv2ABI() || Subtarget.isAIXABI()) {
525       if (Subtarget.hasP9Vector() && EnableGPRToVecSpills &&
526           RC == &PPC::G8RCRegClass) {
527         InflateGP8RC++;
528         return &PPC::SPILLTOVSRRCRegClass;
529       }
530       if (RC == &PPC::GPRCRegClass && EnableGPRToVecSpills)
531         InflateGPRC++;
532     }
533     if (RC == &PPC::F8RCRegClass)
534       return &PPC::VSFRCRegClass;
535     else if (RC == &PPC::VRRCRegClass)
536       return &PPC::VSRCRegClass;
537     else if (RC == &PPC::F4RCRegClass && Subtarget.hasP8Vector())
538       return &PPC::VSSRCRegClass;
539   }
540 
541   return TargetRegisterInfo::getLargestLegalSuperClass(RC, MF);
542 }
543 
544 //===----------------------------------------------------------------------===//
545 // Stack Frame Processing methods
546 //===----------------------------------------------------------------------===//
547 
548 /// lowerDynamicAlloc - Generate the code for allocating an object in the
549 /// current frame.  The sequence of code will be in the general form
550 ///
551 ///   addi   R0, SP, \#frameSize ; get the address of the previous frame
552 ///   stwxu  R0, SP, Rnegsize   ; add and update the SP with the negated size
553 ///   addi   Rnew, SP, \#maxCalFrameSize ; get the top of the allocation
554 ///
lowerDynamicAlloc(MachineBasicBlock::iterator II) const555 void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const {
556   // Get the instruction.
557   MachineInstr &MI = *II;
558   // Get the instruction's basic block.
559   MachineBasicBlock &MBB = *MI.getParent();
560   // Get the basic block's function.
561   MachineFunction &MF = *MBB.getParent();
562   // Get the frame info.
563   MachineFrameInfo &MFI = MF.getFrameInfo();
564   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
565   // Get the instruction info.
566   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
567   // Determine whether 64-bit pointers are used.
568   bool LP64 = TM.isPPC64();
569   DebugLoc dl = MI.getDebugLoc();
570 
571   // Get the maximum call stack size.
572   unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
573   Align MaxAlign = MFI.getMaxAlign();
574   assert(isAligned(MaxAlign, maxCallFrameSize) &&
575          "Maximum call-frame size not sufficiently aligned");
576   (void)MaxAlign;
577 
578   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
579   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
580   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
581   bool KillNegSizeReg = MI.getOperand(1).isKill();
582   Register NegSizeReg = MI.getOperand(1).getReg();
583 
584   prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, Reg);
585   // Grow the stack and update the stack pointer link, then determine the
586   // address of new allocated space.
587   if (LP64) {
588     BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1)
589         .addReg(Reg, RegState::Kill)
590         .addReg(PPC::X1)
591         .addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
592     BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
593         .addReg(PPC::X1)
594         .addImm(maxCallFrameSize);
595   } else {
596     BuildMI(MBB, II, dl, TII.get(PPC::STWUX), PPC::R1)
597         .addReg(Reg, RegState::Kill)
598         .addReg(PPC::R1)
599         .addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
600     BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
601         .addReg(PPC::R1)
602         .addImm(maxCallFrameSize);
603   }
604 
605   // Discard the DYNALLOC instruction.
606   MBB.erase(II);
607 }
608 
609 /// To accomplish dynamic stack allocation, we have to calculate exact size
610 /// subtracted from the stack pointer according alignment information and get
611 /// previous frame pointer.
prepareDynamicAlloca(MachineBasicBlock::iterator II,Register & NegSizeReg,bool & KillNegSizeReg,Register & FramePointer) const612 void PPCRegisterInfo::prepareDynamicAlloca(MachineBasicBlock::iterator II,
613                                            Register &NegSizeReg,
614                                            bool &KillNegSizeReg,
615                                            Register &FramePointer) const {
616   // Get the instruction.
617   MachineInstr &MI = *II;
618   // Get the instruction's basic block.
619   MachineBasicBlock &MBB = *MI.getParent();
620   // Get the basic block's function.
621   MachineFunction &MF = *MBB.getParent();
622   // Get the frame info.
623   MachineFrameInfo &MFI = MF.getFrameInfo();
624   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
625   // Get the instruction info.
626   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
627   // Determine whether 64-bit pointers are used.
628   bool LP64 = TM.isPPC64();
629   DebugLoc dl = MI.getDebugLoc();
630   // Get the total frame size.
631   unsigned FrameSize = MFI.getStackSize();
632 
633   // Get stack alignments.
634   const PPCFrameLowering *TFI = getFrameLowering(MF);
635   Align TargetAlign = TFI->getStackAlign();
636   Align MaxAlign = MFI.getMaxAlign();
637 
638   // Determine the previous frame's address.  If FrameSize can't be
639   // represented as 16 bits or we need special alignment, then we load the
640   // previous frame's address from 0(SP).  Why not do an addis of the hi?
641   // Because R0 is our only safe tmp register and addi/addis treat R0 as zero.
642   // Constructing the constant and adding would take 3 instructions.
643   // Fortunately, a frame greater than 32K is rare.
644   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
645   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
646 
647   if (MaxAlign < TargetAlign && isInt<16>(FrameSize)) {
648     if (LP64)
649       BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), FramePointer)
650           .addReg(PPC::X31)
651           .addImm(FrameSize);
652     else
653       BuildMI(MBB, II, dl, TII.get(PPC::ADDI), FramePointer)
654           .addReg(PPC::R31)
655           .addImm(FrameSize);
656   } else if (LP64) {
657     BuildMI(MBB, II, dl, TII.get(PPC::LD), FramePointer)
658         .addImm(0)
659         .addReg(PPC::X1);
660   } else {
661     BuildMI(MBB, II, dl, TII.get(PPC::LWZ), FramePointer)
662         .addImm(0)
663         .addReg(PPC::R1);
664   }
665   // Determine the actual NegSizeReg according to alignment info.
666   if (LP64) {
667     if (MaxAlign > TargetAlign) {
668       unsigned UnalNegSizeReg = NegSizeReg;
669       NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
670 
671       // Unfortunately, there is no andi, only andi., and we can't insert that
672       // here because we might clobber cr0 while it is live.
673       BuildMI(MBB, II, dl, TII.get(PPC::LI8), NegSizeReg)
674           .addImm(~(MaxAlign.value() - 1));
675 
676       unsigned NegSizeReg1 = NegSizeReg;
677       NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
678       BuildMI(MBB, II, dl, TII.get(PPC::AND8), NegSizeReg)
679           .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
680           .addReg(NegSizeReg1, RegState::Kill);
681       KillNegSizeReg = true;
682     }
683   } else {
684     if (MaxAlign > TargetAlign) {
685       unsigned UnalNegSizeReg = NegSizeReg;
686       NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
687 
688       // Unfortunately, there is no andi, only andi., and we can't insert that
689       // here because we might clobber cr0 while it is live.
690       BuildMI(MBB, II, dl, TII.get(PPC::LI), NegSizeReg)
691           .addImm(~(MaxAlign.value() - 1));
692 
693       unsigned NegSizeReg1 = NegSizeReg;
694       NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
695       BuildMI(MBB, II, dl, TII.get(PPC::AND), NegSizeReg)
696           .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
697           .addReg(NegSizeReg1, RegState::Kill);
698       KillNegSizeReg = true;
699     }
700   }
701 }
702 
lowerPrepareProbedAlloca(MachineBasicBlock::iterator II) const703 void PPCRegisterInfo::lowerPrepareProbedAlloca(
704     MachineBasicBlock::iterator II) const {
705   MachineInstr &MI = *II;
706   // Get the instruction's basic block.
707   MachineBasicBlock &MBB = *MI.getParent();
708   // Get the basic block's function.
709   MachineFunction &MF = *MBB.getParent();
710   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
711   // Get the instruction info.
712   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
713   // Determine whether 64-bit pointers are used.
714   bool LP64 = TM.isPPC64();
715   DebugLoc dl = MI.getDebugLoc();
716   Register FramePointer = MI.getOperand(0).getReg();
717   const Register ActualNegSizeReg = MI.getOperand(1).getReg();
718   bool KillNegSizeReg = MI.getOperand(2).isKill();
719   Register NegSizeReg = MI.getOperand(2).getReg();
720   const MCInstrDesc &CopyInst = TII.get(LP64 ? PPC::OR8 : PPC::OR);
721   // RegAllocator might allocate FramePointer and NegSizeReg in the same phyreg.
722   if (FramePointer == NegSizeReg) {
723     assert(KillNegSizeReg && "FramePointer is a def and NegSizeReg is an use, "
724                              "NegSizeReg should be killed");
725     // FramePointer is clobbered earlier than the use of NegSizeReg in
726     // prepareDynamicAlloca, save NegSizeReg in ActualNegSizeReg to avoid
727     // misuse.
728     BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg)
729         .addReg(NegSizeReg)
730         .addReg(NegSizeReg);
731     NegSizeReg = ActualNegSizeReg;
732     KillNegSizeReg = false;
733   }
734   prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, FramePointer);
735   // NegSizeReg might be updated in prepareDynamicAlloca if MaxAlign >
736   // TargetAlign.
737   if (NegSizeReg != ActualNegSizeReg)
738     BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg)
739         .addReg(NegSizeReg)
740         .addReg(NegSizeReg);
741   MBB.erase(II);
742 }
743 
lowerDynamicAreaOffset(MachineBasicBlock::iterator II) const744 void PPCRegisterInfo::lowerDynamicAreaOffset(
745     MachineBasicBlock::iterator II) const {
746   // Get the instruction.
747   MachineInstr &MI = *II;
748   // Get the instruction's basic block.
749   MachineBasicBlock &MBB = *MI.getParent();
750   // Get the basic block's function.
751   MachineFunction &MF = *MBB.getParent();
752   // Get the frame info.
753   MachineFrameInfo &MFI = MF.getFrameInfo();
754   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
755   // Get the instruction info.
756   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
757 
758   unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
759   bool is64Bit = TM.isPPC64();
760   DebugLoc dl = MI.getDebugLoc();
761   BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI),
762           MI.getOperand(0).getReg())
763       .addImm(maxCallFrameSize);
764   MBB.erase(II);
765 }
766 
767 /// lowerCRSpilling - Generate the code for spilling a CR register. Instead of
768 /// reserving a whole register (R0), we scrounge for one here. This generates
769 /// code like this:
770 ///
771 ///   mfcr rA                  ; Move the conditional register into GPR rA.
772 ///   rlwinm rA, rA, SB, 0, 31 ; Shift the bits left so they are in CR0's slot.
773 ///   stw rA, FI               ; Store rA to the frame.
774 ///
lowerCRSpilling(MachineBasicBlock::iterator II,unsigned FrameIndex) const775 void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
776                                       unsigned FrameIndex) const {
777   // Get the instruction.
778   MachineInstr &MI = *II;       // ; SPILL_CR <SrcReg>, <offset>
779   // Get the instruction's basic block.
780   MachineBasicBlock &MBB = *MI.getParent();
781   MachineFunction &MF = *MBB.getParent();
782   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
783   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
784   DebugLoc dl = MI.getDebugLoc();
785 
786   bool LP64 = TM.isPPC64();
787   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
788   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
789 
790   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
791   Register SrcReg = MI.getOperand(0).getReg();
792 
793   // We need to store the CR in the low 4-bits of the saved value. First, issue
794   // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg.
795   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
796       .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill()));
797 
798   // If the saved register wasn't CR0, shift the bits left so that they are in
799   // CR0's slot.
800   if (SrcReg != PPC::CR0) {
801     Register Reg1 = Reg;
802     Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
803 
804     // rlwinm rA, rA, ShiftBits, 0, 31.
805     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
806       .addReg(Reg1, RegState::Kill)
807       .addImm(getEncodingValue(SrcReg) * 4)
808       .addImm(0)
809       .addImm(31);
810   }
811 
812   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
813                     .addReg(Reg, RegState::Kill),
814                     FrameIndex);
815 
816   // Discard the pseudo instruction.
817   MBB.erase(II);
818 }
819 
lowerCRRestore(MachineBasicBlock::iterator II,unsigned FrameIndex) const820 void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
821                                       unsigned FrameIndex) const {
822   // Get the instruction.
823   MachineInstr &MI = *II;       // ; <DestReg> = RESTORE_CR <offset>
824   // Get the instruction's basic block.
825   MachineBasicBlock &MBB = *MI.getParent();
826   MachineFunction &MF = *MBB.getParent();
827   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
828   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
829   DebugLoc dl = MI.getDebugLoc();
830 
831   bool LP64 = TM.isPPC64();
832   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
833   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
834 
835   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
836   Register DestReg = MI.getOperand(0).getReg();
837   assert(MI.definesRegister(DestReg) &&
838     "RESTORE_CR does not define its destination");
839 
840   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
841                               Reg), FrameIndex);
842 
843   // If the reloaded register isn't CR0, shift the bits right so that they are
844   // in the right CR's slot.
845   if (DestReg != PPC::CR0) {
846     Register Reg1 = Reg;
847     Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
848 
849     unsigned ShiftBits = getEncodingValue(DestReg)*4;
850     // rlwinm r11, r11, 32-ShiftBits, 0, 31.
851     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
852              .addReg(Reg1, RegState::Kill).addImm(32-ShiftBits).addImm(0)
853              .addImm(31);
854   }
855 
856   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg)
857              .addReg(Reg, RegState::Kill);
858 
859   // Discard the pseudo instruction.
860   MBB.erase(II);
861 }
862 
lowerCRBitSpilling(MachineBasicBlock::iterator II,unsigned FrameIndex) const863 void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II,
864                                          unsigned FrameIndex) const {
865   // Get the instruction.
866   MachineInstr &MI = *II;       // ; SPILL_CRBIT <SrcReg>, <offset>
867   // Get the instruction's basic block.
868   MachineBasicBlock &MBB = *MI.getParent();
869   MachineFunction &MF = *MBB.getParent();
870   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
871   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
872   const TargetRegisterInfo* TRI = Subtarget.getRegisterInfo();
873   DebugLoc dl = MI.getDebugLoc();
874 
875   bool LP64 = TM.isPPC64();
876   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
877   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
878 
879   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
880   Register SrcReg = MI.getOperand(0).getReg();
881 
882   // Search up the BB to find the definition of the CR bit.
883   MachineBasicBlock::reverse_iterator Ins = MI;
884   MachineBasicBlock::reverse_iterator Rend = MBB.rend();
885   ++Ins;
886   unsigned CRBitSpillDistance = 0;
887   bool SeenUse = false;
888   for (; Ins != Rend; ++Ins) {
889     // Definition found.
890     if (Ins->modifiesRegister(SrcReg, TRI))
891       break;
892     // Use found.
893     if (Ins->readsRegister(SrcReg, TRI))
894       SeenUse = true;
895     // Unable to find CR bit definition within maximum search distance.
896     if (CRBitSpillDistance == MaxCRBitSpillDist) {
897       Ins = MI;
898       break;
899     }
900     // Skip debug instructions when counting CR bit spill distance.
901     if (!Ins->isDebugInstr())
902       CRBitSpillDistance++;
903   }
904 
905   // Unable to find the definition of the CR bit in the MBB.
906   if (Ins == MBB.rend())
907     Ins = MI;
908 
909   bool SpillsKnownBit = false;
910   // There is no need to extract the CR bit if its value is already known.
911   switch (Ins->getOpcode()) {
912   case PPC::CRUNSET:
913     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LI8 : PPC::LI), Reg)
914       .addImm(0);
915     SpillsKnownBit = true;
916     break;
917   case PPC::CRSET:
918     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LIS8 : PPC::LIS), Reg)
919       .addImm(-32768);
920     SpillsKnownBit = true;
921     break;
922   default:
923     // On Power10, we can use SETNBC to spill all CR bits. SETNBC will set all
924     // bits (specifically, it produces a -1 if the CR bit is set). Ultimately,
925     // the bit that is of importance to us is bit 32 (bit 0 of a 32-bit
926     // register), and SETNBC will set this.
927     if (Subtarget.isISA3_1()) {
928       BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETNBC8 : PPC::SETNBC), Reg)
929           .addReg(SrcReg, RegState::Undef);
930       break;
931     }
932 
933     // On Power9, we can use SETB to extract the LT bit. This only works for
934     // the LT bit since SETB produces -1/1/0 for LT/GT/<neither>. So the value
935     // of the bit we care about (32-bit sign bit) will be set to the value of
936     // the LT bit (regardless of the other bits in the CR field).
937     if (Subtarget.isISA3_0()) {
938       if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR1LT ||
939           SrcReg == PPC::CR2LT || SrcReg == PPC::CR3LT ||
940           SrcReg == PPC::CR4LT || SrcReg == PPC::CR5LT ||
941           SrcReg == PPC::CR6LT || SrcReg == PPC::CR7LT) {
942         BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETB8 : PPC::SETB), Reg)
943           .addReg(getCRFromCRBit(SrcReg), RegState::Undef);
944         break;
945       }
946     }
947 
948     // We need to move the CR field that contains the CR bit we are spilling.
949     // The super register may not be explicitly defined (i.e. it can be defined
950     // by a CR-logical that only defines the subreg) so we state that the CR
951     // field is undef. Also, in order to preserve the kill flag on the CR bit,
952     // we add it as an implicit use.
953     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
954       .addReg(getCRFromCRBit(SrcReg), RegState::Undef)
955       .addReg(SrcReg,
956               RegState::Implicit | getKillRegState(MI.getOperand(0).isKill()));
957 
958     // If the saved register wasn't CR0LT, shift the bits left so that the bit
959     // to store is the first one. Mask all but that bit.
960     Register Reg1 = Reg;
961     Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
962 
963     // rlwinm rA, rA, ShiftBits, 0, 0.
964     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
965       .addReg(Reg1, RegState::Kill)
966       .addImm(getEncodingValue(SrcReg))
967       .addImm(0).addImm(0);
968   }
969   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
970                     .addReg(Reg, RegState::Kill),
971                     FrameIndex);
972 
973   bool KillsCRBit = MI.killsRegister(SrcReg, TRI);
974   // Discard the pseudo instruction.
975   MBB.erase(II);
976   if (SpillsKnownBit && KillsCRBit && !SeenUse) {
977     Ins->setDesc(TII.get(PPC::UNENCODED_NOP));
978     Ins->RemoveOperand(0);
979   }
980 }
981 
lowerCRBitRestore(MachineBasicBlock::iterator II,unsigned FrameIndex) const982 void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II,
983                                       unsigned FrameIndex) const {
984   // Get the instruction.
985   MachineInstr &MI = *II;       // ; <DestReg> = RESTORE_CRBIT <offset>
986   // Get the instruction's basic block.
987   MachineBasicBlock &MBB = *MI.getParent();
988   MachineFunction &MF = *MBB.getParent();
989   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
990   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
991   DebugLoc dl = MI.getDebugLoc();
992 
993   bool LP64 = TM.isPPC64();
994   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
995   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
996 
997   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
998   Register DestReg = MI.getOperand(0).getReg();
999   assert(MI.definesRegister(DestReg) &&
1000     "RESTORE_CRBIT does not define its destination");
1001 
1002   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
1003                               Reg), FrameIndex);
1004 
1005   BuildMI(MBB, II, dl, TII.get(TargetOpcode::IMPLICIT_DEF), DestReg);
1006 
1007   Register RegO = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1008   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), RegO)
1009           .addReg(getCRFromCRBit(DestReg));
1010 
1011   unsigned ShiftBits = getEncodingValue(DestReg);
1012   // rlwimi r11, r10, 32-ShiftBits, ..., ...
1013   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), RegO)
1014       .addReg(RegO, RegState::Kill)
1015       .addReg(Reg, RegState::Kill)
1016       .addImm(ShiftBits ? 32 - ShiftBits : 0)
1017       .addImm(ShiftBits)
1018       .addImm(ShiftBits);
1019 
1020   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF),
1021           getCRFromCRBit(DestReg))
1022       .addReg(RegO, RegState::Kill)
1023       // Make sure we have a use dependency all the way through this
1024       // sequence of instructions. We can't have the other bits in the CR
1025       // modified in between the mfocrf and the mtocrf.
1026       .addReg(getCRFromCRBit(DestReg), RegState::Implicit);
1027 
1028   // Discard the pseudo instruction.
1029   MBB.erase(II);
1030 }
1031 
emitAccCopyInfo(MachineBasicBlock & MBB,MCRegister DestReg,MCRegister SrcReg)1032 void PPCRegisterInfo::emitAccCopyInfo(MachineBasicBlock &MBB,
1033                                       MCRegister DestReg, MCRegister SrcReg) {
1034 #ifdef NDEBUG
1035   return;
1036 #else
1037   if (ReportAccMoves) {
1038     std::string Dest = PPC::ACCRCRegClass.contains(DestReg) ? "acc" : "uacc";
1039     std::string Src = PPC::ACCRCRegClass.contains(SrcReg) ? "acc" : "uacc";
1040     dbgs() << "Emitting copy from " << Src << " to " << Dest << ":\n";
1041     MBB.dump();
1042   }
1043 #endif
1044 }
1045 
emitAccSpillRestoreInfo(MachineBasicBlock & MBB,bool IsPrimed,bool IsRestore)1046 static void emitAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsPrimed,
1047                                     bool IsRestore) {
1048 #ifdef NDEBUG
1049   return;
1050 #else
1051   if (ReportAccMoves) {
1052     dbgs() << "Emitting " << (IsPrimed ? "acc" : "uacc") << " register "
1053            << (IsRestore ? "restore" : "spill") << ":\n";
1054     MBB.dump();
1055   }
1056 #endif
1057 }
1058 
1059 /// lowerACCSpilling - Generate the code for spilling the accumulator register.
1060 /// Similarly to other spills/reloads that use pseudo-ops, we do not actually
1061 /// eliminate the FrameIndex here nor compute the stack offset. We simply
1062 /// create a real instruction with an FI and rely on eliminateFrameIndex to
1063 /// handle the FI elimination.
lowerACCSpilling(MachineBasicBlock::iterator II,unsigned FrameIndex) const1064 void PPCRegisterInfo::lowerACCSpilling(MachineBasicBlock::iterator II,
1065                                        unsigned FrameIndex) const {
1066   MachineInstr &MI = *II; // SPILL_ACC <SrcReg>, <offset>
1067   MachineBasicBlock &MBB = *MI.getParent();
1068   MachineFunction &MF = *MBB.getParent();
1069   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1070   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1071   DebugLoc DL = MI.getDebugLoc();
1072   Register SrcReg = MI.getOperand(0).getReg();
1073   bool IsKilled = MI.getOperand(0).isKill();
1074 
1075   bool IsPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1076   Register Reg =
1077       PPC::VSRp0 + (SrcReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
1078   bool IsLittleEndian = Subtarget.isLittleEndian();
1079 
1080   emitAccSpillRestoreInfo(MBB, IsPrimed, false);
1081 
1082   // De-prime the register being spilled, create two stores for the pair
1083   // subregisters accounting for endianness and then re-prime the register if
1084   // it isn't killed.  This uses the Offset parameter to addFrameReference() to
1085   // adjust the offset of the store that is within the 64-byte stack slot.
1086   if (IsPrimed)
1087     BuildMI(MBB, II, DL, TII.get(PPC::XXMFACC), SrcReg).addReg(SrcReg);
1088   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
1089                         .addReg(Reg, getKillRegState(IsKilled)),
1090                     FrameIndex, IsLittleEndian ? 32 : 0);
1091   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
1092                         .addReg(Reg + 1, getKillRegState(IsKilled)),
1093                     FrameIndex, IsLittleEndian ? 0 : 32);
1094   if (IsPrimed && !IsKilled)
1095     BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), SrcReg).addReg(SrcReg);
1096 
1097   // Discard the pseudo instruction.
1098   MBB.erase(II);
1099 }
1100 
1101 /// lowerACCRestore - Generate the code to restore the accumulator register.
lowerACCRestore(MachineBasicBlock::iterator II,unsigned FrameIndex) const1102 void PPCRegisterInfo::lowerACCRestore(MachineBasicBlock::iterator II,
1103                                       unsigned FrameIndex) const {
1104   MachineInstr &MI = *II; // <DestReg> = RESTORE_ACC <offset>
1105   MachineBasicBlock &MBB = *MI.getParent();
1106   MachineFunction &MF = *MBB.getParent();
1107   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1108   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1109   DebugLoc DL = MI.getDebugLoc();
1110 
1111   Register DestReg = MI.getOperand(0).getReg();
1112   assert(MI.definesRegister(DestReg) &&
1113          "RESTORE_ACC does not define its destination");
1114 
1115   bool IsPrimed = PPC::ACCRCRegClass.contains(DestReg);
1116   Register Reg =
1117       PPC::VSRp0 + (DestReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
1118   bool IsLittleEndian = Subtarget.isLittleEndian();
1119 
1120   emitAccSpillRestoreInfo(MBB, IsPrimed, true);
1121 
1122   // Create two loads for the pair subregisters accounting for endianness and
1123   // then prime the accumulator register being restored.
1124   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg),
1125                     FrameIndex, IsLittleEndian ? 32 : 0);
1126   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg + 1),
1127                     FrameIndex, IsLittleEndian ? 0 : 32);
1128   if (IsPrimed)
1129     BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), DestReg).addReg(DestReg);
1130 
1131   // Discard the pseudo instruction.
1132   MBB.erase(II);
1133 }
1134 
hasReservedSpillSlot(const MachineFunction & MF,Register Reg,int & FrameIdx) const1135 bool PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
1136                                            Register Reg, int &FrameIdx) const {
1137   // For the nonvolatile condition registers (CR2, CR3, CR4) return true to
1138   // prevent allocating an additional frame slot.
1139   // For 64-bit ELF and AIX, the CR save area is in the linkage area at SP+8,
1140   // for 32-bit AIX the CR save area is in the linkage area at SP+4.
1141   // We have created a FrameIndex to that spill slot to keep the CalleSaveInfos
1142   // valid.
1143   // For 32-bit ELF, we have previously created the stack slot if needed, so
1144   // return its FrameIdx.
1145   if (PPC::CR2 <= Reg && Reg <= PPC::CR4) {
1146     FrameIdx = MF.getInfo<PPCFunctionInfo>()->getCRSpillFrameIndex();
1147     return true;
1148   }
1149   return false;
1150 }
1151 
1152 // If the offset must be a multiple of some value, return what that value is.
offsetMinAlignForOpcode(unsigned OpC)1153 static unsigned offsetMinAlignForOpcode(unsigned OpC) {
1154   switch (OpC) {
1155   default:
1156     return 1;
1157   case PPC::LWA:
1158   case PPC::LWA_32:
1159   case PPC::LD:
1160   case PPC::LDU:
1161   case PPC::STD:
1162   case PPC::STDU:
1163   case PPC::DFLOADf32:
1164   case PPC::DFLOADf64:
1165   case PPC::DFSTOREf32:
1166   case PPC::DFSTOREf64:
1167   case PPC::LXSD:
1168   case PPC::LXSSP:
1169   case PPC::STXSD:
1170   case PPC::STXSSP:
1171     return 4;
1172   case PPC::EVLDD:
1173   case PPC::EVSTDD:
1174     return 8;
1175   case PPC::LXV:
1176   case PPC::STXV:
1177     return 16;
1178   }
1179 }
1180 
1181 // If the offset must be a multiple of some value, return what that value is.
offsetMinAlign(const MachineInstr & MI)1182 static unsigned offsetMinAlign(const MachineInstr &MI) {
1183   unsigned OpC = MI.getOpcode();
1184   return offsetMinAlignForOpcode(OpC);
1185 }
1186 
1187 // Return the OffsetOperandNo given the FIOperandNum (and the instruction).
getOffsetONFromFION(const MachineInstr & MI,unsigned FIOperandNum)1188 static unsigned getOffsetONFromFION(const MachineInstr &MI,
1189                                     unsigned FIOperandNum) {
1190   // Take into account whether it's an add or mem instruction
1191   unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2;
1192   if (MI.isInlineAsm())
1193     OffsetOperandNo = FIOperandNum - 1;
1194   else if (MI.getOpcode() == TargetOpcode::STACKMAP ||
1195            MI.getOpcode() == TargetOpcode::PATCHPOINT)
1196     OffsetOperandNo = FIOperandNum + 1;
1197 
1198   return OffsetOperandNo;
1199 }
1200 
1201 void
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const1202 PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1203                                      int SPAdj, unsigned FIOperandNum,
1204                                      RegScavenger *RS) const {
1205   assert(SPAdj == 0 && "Unexpected");
1206 
1207   // Get the instruction.
1208   MachineInstr &MI = *II;
1209   // Get the instruction's basic block.
1210   MachineBasicBlock &MBB = *MI.getParent();
1211   // Get the basic block's function.
1212   MachineFunction &MF = *MBB.getParent();
1213   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1214   // Get the instruction info.
1215   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1216   // Get the frame info.
1217   MachineFrameInfo &MFI = MF.getFrameInfo();
1218   DebugLoc dl = MI.getDebugLoc();
1219 
1220   unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
1221 
1222   // Get the frame index.
1223   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
1224 
1225   // Get the frame pointer save index.  Users of this index are primarily
1226   // DYNALLOC instructions.
1227   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1228   int FPSI = FI->getFramePointerSaveIndex();
1229   // Get the instruction opcode.
1230   unsigned OpC = MI.getOpcode();
1231 
1232   if ((OpC == PPC::DYNAREAOFFSET || OpC == PPC::DYNAREAOFFSET8)) {
1233     lowerDynamicAreaOffset(II);
1234     return;
1235   }
1236 
1237   // Special case for dynamic alloca.
1238   if (FPSI && FrameIndex == FPSI &&
1239       (OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) {
1240     lowerDynamicAlloc(II);
1241     return;
1242   }
1243 
1244   if (FPSI && FrameIndex == FPSI &&
1245       (OpC == PPC::PREPARE_PROBED_ALLOCA_64 ||
1246        OpC == PPC::PREPARE_PROBED_ALLOCA_32 ||
1247        OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64 ||
1248        OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32)) {
1249     lowerPrepareProbedAlloca(II);
1250     return;
1251   }
1252 
1253   // Special case for pseudo-ops SPILL_CR and RESTORE_CR, etc.
1254   if (OpC == PPC::SPILL_CR) {
1255     lowerCRSpilling(II, FrameIndex);
1256     return;
1257   } else if (OpC == PPC::RESTORE_CR) {
1258     lowerCRRestore(II, FrameIndex);
1259     return;
1260   } else if (OpC == PPC::SPILL_CRBIT) {
1261     lowerCRBitSpilling(II, FrameIndex);
1262     return;
1263   } else if (OpC == PPC::RESTORE_CRBIT) {
1264     lowerCRBitRestore(II, FrameIndex);
1265     return;
1266   } else if (OpC == PPC::SPILL_ACC || OpC == PPC::SPILL_UACC) {
1267     lowerACCSpilling(II, FrameIndex);
1268     return;
1269   } else if (OpC == PPC::RESTORE_ACC || OpC == PPC::RESTORE_UACC) {
1270     lowerACCRestore(II, FrameIndex);
1271     return;
1272   }
1273 
1274   // Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP).
1275   MI.getOperand(FIOperandNum).ChangeToRegister(
1276     FrameIndex < 0 ? getBaseRegister(MF) : getFrameRegister(MF), false);
1277 
1278   // If the instruction is not present in ImmToIdxMap, then it has no immediate
1279   // form (and must be r+r).
1280   bool noImmForm = !MI.isInlineAsm() && OpC != TargetOpcode::STACKMAP &&
1281                    OpC != TargetOpcode::PATCHPOINT && !ImmToIdxMap.count(OpC);
1282 
1283   // Now add the frame object offset to the offset from r1.
1284   int Offset = MFI.getObjectOffset(FrameIndex);
1285   Offset += MI.getOperand(OffsetOperandNo).getImm();
1286 
1287   // If we're not using a Frame Pointer that has been set to the value of the
1288   // SP before having the stack size subtracted from it, then add the stack size
1289   // to Offset to get the correct offset.
1290   // Naked functions have stack size 0, although getStackSize may not reflect
1291   // that because we didn't call all the pieces that compute it for naked
1292   // functions.
1293   if (!MF.getFunction().hasFnAttribute(Attribute::Naked)) {
1294     if (!(hasBasePointer(MF) && FrameIndex < 0))
1295       Offset += MFI.getStackSize();
1296   }
1297 
1298   // If we can, encode the offset directly into the instruction.  If this is a
1299   // normal PPC "ri" instruction, any 16-bit value can be safely encoded.  If
1300   // this is a PPC64 "ix" instruction, only a 16-bit value with the low two bits
1301   // clear can be encoded.  This is extremely uncommon, because normally you
1302   // only "std" to a stack slot that is at least 4-byte aligned, but it can
1303   // happen in invalid code.
1304   assert(OpC != PPC::DBG_VALUE &&
1305          "This should be handled in a target-independent way");
1306   bool OffsetFitsMnemonic = (OpC == PPC::EVSTDD || OpC == PPC::EVLDD) ?
1307                             isUInt<8>(Offset) :
1308                             isInt<16>(Offset);
1309   if (!noImmForm && ((OffsetFitsMnemonic &&
1310                       ((Offset % offsetMinAlign(MI)) == 0)) ||
1311                      OpC == TargetOpcode::STACKMAP ||
1312                      OpC == TargetOpcode::PATCHPOINT)) {
1313     MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
1314     return;
1315   }
1316 
1317   // The offset doesn't fit into a single register, scavenge one to build the
1318   // offset in.
1319 
1320   bool is64Bit = TM.isPPC64();
1321   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
1322   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1323   const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC;
1324   Register SRegHi = MF.getRegInfo().createVirtualRegister(RC),
1325            SReg = MF.getRegInfo().createVirtualRegister(RC);
1326 
1327   // Insert a set of rA with the full offset value before the ld, st, or add
1328   if (isInt<16>(Offset))
1329     BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI), SReg)
1330       .addImm(Offset);
1331   else {
1332     BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LIS8 : PPC::LIS), SRegHi)
1333       .addImm(Offset >> 16);
1334     BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::ORI8 : PPC::ORI), SReg)
1335       .addReg(SRegHi, RegState::Kill)
1336       .addImm(Offset);
1337   }
1338 
1339   // Convert into indexed form of the instruction:
1340   //
1341   //   sth 0:rA, 1:imm 2:(rB) ==> sthx 0:rA, 2:rB, 1:r0
1342   //   addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0
1343   unsigned OperandBase;
1344 
1345   if (noImmForm)
1346     OperandBase = 1;
1347   else if (OpC != TargetOpcode::INLINEASM &&
1348            OpC != TargetOpcode::INLINEASM_BR) {
1349     assert(ImmToIdxMap.count(OpC) &&
1350            "No indexed form of load or store available!");
1351     unsigned NewOpcode = ImmToIdxMap.find(OpC)->second;
1352     MI.setDesc(TII.get(NewOpcode));
1353     OperandBase = 1;
1354   } else {
1355     OperandBase = OffsetOperandNo;
1356   }
1357 
1358   Register StackReg = MI.getOperand(FIOperandNum).getReg();
1359   MI.getOperand(OperandBase).ChangeToRegister(StackReg, false);
1360   MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true);
1361 }
1362 
getFrameRegister(const MachineFunction & MF) const1363 Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
1364   const PPCFrameLowering *TFI = getFrameLowering(MF);
1365 
1366   if (!TM.isPPC64())
1367     return TFI->hasFP(MF) ? PPC::R31 : PPC::R1;
1368   else
1369     return TFI->hasFP(MF) ? PPC::X31 : PPC::X1;
1370 }
1371 
getBaseRegister(const MachineFunction & MF) const1372 Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
1373   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1374   if (!hasBasePointer(MF))
1375     return getFrameRegister(MF);
1376 
1377   if (TM.isPPC64())
1378     return PPC::X30;
1379 
1380   if (Subtarget.isSVR4ABI() && TM.isPositionIndependent())
1381     return PPC::R29;
1382 
1383   return PPC::R30;
1384 }
1385 
hasBasePointer(const MachineFunction & MF) const1386 bool PPCRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
1387   if (!EnableBasePointer)
1388     return false;
1389   if (AlwaysBasePointer)
1390     return true;
1391 
1392   // If we need to realign the stack, then the stack pointer can no longer
1393   // serve as an offset into the caller's stack space. As a result, we need a
1394   // base pointer.
1395   return hasStackRealignment(MF);
1396 }
1397 
1398 /// Returns true if the instruction's frame index
1399 /// reference would be better served by a base register other than FP
1400 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
1401 /// references it should create new base registers for.
1402 bool PPCRegisterInfo::
needsFrameBaseReg(MachineInstr * MI,int64_t Offset) const1403 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1404   assert(Offset < 0 && "Local offset must be negative");
1405 
1406   // It's the load/store FI references that cause issues, as it can be difficult
1407   // to materialize the offset if it won't fit in the literal field. Estimate
1408   // based on the size of the local frame and some conservative assumptions
1409   // about the rest of the stack frame (note, this is pre-regalloc, so
1410   // we don't know everything for certain yet) whether this offset is likely
1411   // to be out of range of the immediate. Return true if so.
1412 
1413   // We only generate virtual base registers for loads and stores that have
1414   // an r+i form. Return false for everything else.
1415   unsigned OpC = MI->getOpcode();
1416   if (!ImmToIdxMap.count(OpC))
1417     return false;
1418 
1419   // Don't generate a new virtual base register just to add zero to it.
1420   if ((OpC == PPC::ADDI || OpC == PPC::ADDI8) &&
1421       MI->getOperand(2).getImm() == 0)
1422     return false;
1423 
1424   MachineBasicBlock &MBB = *MI->getParent();
1425   MachineFunction &MF = *MBB.getParent();
1426   const PPCFrameLowering *TFI = getFrameLowering(MF);
1427   unsigned StackEst = TFI->determineFrameLayout(MF, true);
1428 
1429   // If we likely don't need a stack frame, then we probably don't need a
1430   // virtual base register either.
1431   if (!StackEst)
1432     return false;
1433 
1434   // Estimate an offset from the stack pointer.
1435   // The incoming offset is relating to the SP at the start of the function,
1436   // but when we access the local it'll be relative to the SP after local
1437   // allocation, so adjust our SP-relative offset by that allocation size.
1438   Offset += StackEst;
1439 
1440   // The frame pointer will point to the end of the stack, so estimate the
1441   // offset as the difference between the object offset and the FP location.
1442   return !isFrameOffsetLegal(MI, getBaseRegister(MF), Offset);
1443 }
1444 
1445 /// Insert defining instruction(s) for BaseReg to
1446 /// be a pointer to FrameIdx at the beginning of the basic block.
materializeFrameBaseRegister(MachineBasicBlock * MBB,int FrameIdx,int64_t Offset) const1447 Register PPCRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
1448                                                        int FrameIdx,
1449                                                        int64_t Offset) const {
1450   unsigned ADDriOpc = TM.isPPC64() ? PPC::ADDI8 : PPC::ADDI;
1451 
1452   MachineBasicBlock::iterator Ins = MBB->begin();
1453   DebugLoc DL;                  // Defaults to "unknown"
1454   if (Ins != MBB->end())
1455     DL = Ins->getDebugLoc();
1456 
1457   const MachineFunction &MF = *MBB->getParent();
1458   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1459   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1460   const MCInstrDesc &MCID = TII.get(ADDriOpc);
1461   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1462   const TargetRegisterClass *RC = getPointerRegClass(MF);
1463   Register BaseReg = MRI.createVirtualRegister(RC);
1464   MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
1465 
1466   BuildMI(*MBB, Ins, DL, MCID, BaseReg)
1467     .addFrameIndex(FrameIdx).addImm(Offset);
1468 
1469   return BaseReg;
1470 }
1471 
resolveFrameIndex(MachineInstr & MI,Register BaseReg,int64_t Offset) const1472 void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
1473                                         int64_t Offset) const {
1474   unsigned FIOperandNum = 0;
1475   while (!MI.getOperand(FIOperandNum).isFI()) {
1476     ++FIOperandNum;
1477     assert(FIOperandNum < MI.getNumOperands() &&
1478            "Instr doesn't have FrameIndex operand!");
1479   }
1480 
1481   MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
1482   unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
1483   Offset += MI.getOperand(OffsetOperandNo).getImm();
1484   MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
1485 
1486   MachineBasicBlock &MBB = *MI.getParent();
1487   MachineFunction &MF = *MBB.getParent();
1488   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1489   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1490   const MCInstrDesc &MCID = MI.getDesc();
1491   MachineRegisterInfo &MRI = MF.getRegInfo();
1492   MRI.constrainRegClass(BaseReg,
1493                         TII.getRegClass(MCID, FIOperandNum, this, MF));
1494 }
1495 
isFrameOffsetLegal(const MachineInstr * MI,Register BaseReg,int64_t Offset) const1496 bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1497                                          Register BaseReg,
1498                                          int64_t Offset) const {
1499   unsigned FIOperandNum = 0;
1500   while (!MI->getOperand(FIOperandNum).isFI()) {
1501     ++FIOperandNum;
1502     assert(FIOperandNum < MI->getNumOperands() &&
1503            "Instr doesn't have FrameIndex operand!");
1504   }
1505 
1506   unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum);
1507   Offset += MI->getOperand(OffsetOperandNo).getImm();
1508 
1509   return MI->getOpcode() == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm
1510          MI->getOpcode() == TargetOpcode::STACKMAP ||
1511          MI->getOpcode() == TargetOpcode::PATCHPOINT ||
1512          (isInt<16>(Offset) && (Offset % offsetMinAlign(*MI)) == 0);
1513 }
1514