1 //===-- ThumbRegisterInfo.cpp - Thumb-1 Register Information -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Thumb-1 implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "ThumbRegisterInfo.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMMachineFunctionInfo.h"
17 #include "ARMSubtarget.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "llvm/CodeGen/MachineConstantPool.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/RegisterScavenging.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/CodeGen/TargetFrameLowering.h"
32 #include "llvm/Target/TargetMachine.h"
33
34 namespace llvm {
35 extern cl::opt<bool> ReuseFrameIndexVals;
36 }
37
38 using namespace llvm;
39
40 ThumbRegisterInfo::ThumbRegisterInfo() = default;
41
42 const TargetRegisterClass *
getLargestLegalSuperClass(const TargetRegisterClass * RC,const MachineFunction & MF) const43 ThumbRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
44 const MachineFunction &MF) const {
45 if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
46 return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
47
48 if (ARM::tGPRRegClass.hasSubClassEq(RC))
49 return &ARM::tGPRRegClass;
50 return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
51 }
52
53 const TargetRegisterClass *
getPointerRegClass(const MachineFunction & MF,unsigned Kind) const54 ThumbRegisterInfo::getPointerRegClass(const MachineFunction &MF,
55 unsigned Kind) const {
56 if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
57 return ARMBaseRegisterInfo::getPointerRegClass(MF, Kind);
58 return &ARM::tGPRRegClass;
59 }
60
emitThumb1LoadConstPool(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,unsigned DestReg,unsigned SubIdx,int Val,ARMCC::CondCodes Pred,unsigned PredReg,unsigned MIFlags)61 static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
62 MachineBasicBlock::iterator &MBBI,
63 const DebugLoc &dl, unsigned DestReg,
64 unsigned SubIdx, int Val,
65 ARMCC::CondCodes Pred, unsigned PredReg,
66 unsigned MIFlags) {
67 MachineFunction &MF = *MBB.getParent();
68 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
69 const TargetInstrInfo &TII = *STI.getInstrInfo();
70 MachineConstantPool *ConstantPool = MF.getConstantPool();
71 const Constant *C = ConstantInt::get(
72 Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
73 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4));
74
75 BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
76 .addReg(DestReg, getDefRegState(true), SubIdx)
77 .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg)
78 .setMIFlags(MIFlags);
79 }
80
emitThumb2LoadConstPool(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,unsigned DestReg,unsigned SubIdx,int Val,ARMCC::CondCodes Pred,unsigned PredReg,unsigned MIFlags)81 static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
82 MachineBasicBlock::iterator &MBBI,
83 const DebugLoc &dl, unsigned DestReg,
84 unsigned SubIdx, int Val,
85 ARMCC::CondCodes Pred, unsigned PredReg,
86 unsigned MIFlags) {
87 MachineFunction &MF = *MBB.getParent();
88 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
89 MachineConstantPool *ConstantPool = MF.getConstantPool();
90 const Constant *C = ConstantInt::get(
91 Type::getInt32Ty(MBB.getParent()->getFunction().getContext()), Val);
92 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align(4));
93
94 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
95 .addReg(DestReg, getDefRegState(true), SubIdx)
96 .addConstantPoolIndex(Idx)
97 .add(predOps(ARMCC::AL))
98 .setMIFlags(MIFlags);
99 }
100
101 /// emitLoadConstPool - Emits a load from constpool to materialize the
102 /// specified immediate.
emitLoadConstPool(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,Register DestReg,unsigned SubIdx,int Val,ARMCC::CondCodes Pred,Register PredReg,unsigned MIFlags) const103 void ThumbRegisterInfo::emitLoadConstPool(
104 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
105 const DebugLoc &dl, Register DestReg, unsigned SubIdx, int Val,
106 ARMCC::CondCodes Pred, Register PredReg, unsigned MIFlags) const {
107 MachineFunction &MF = *MBB.getParent();
108 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
109 if (STI.isThumb1Only()) {
110 assert((isARMLowRegister(DestReg) || DestReg.isVirtual()) &&
111 "Thumb1 does not have ldr to high register");
112 return emitThumb1LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
113 PredReg, MIFlags);
114 }
115 return emitThumb2LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
116 PredReg, MIFlags);
117 }
118
119 /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
120 /// a destreg = basereg + immediate in Thumb code. Materialize the immediate
121 /// in a register using mov / mvn sequences or load the immediate from a
122 /// constpool entry.
emitThumbRegPlusImmInReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,Register DestReg,Register BaseReg,int NumBytes,bool CanChangeCC,const TargetInstrInfo & TII,const ARMBaseRegisterInfo & MRI,unsigned MIFlags=MachineInstr::NoFlags)123 static void emitThumbRegPlusImmInReg(
124 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
125 const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes,
126 bool CanChangeCC, const TargetInstrInfo &TII,
127 const ARMBaseRegisterInfo &MRI, unsigned MIFlags = MachineInstr::NoFlags) {
128 MachineFunction &MF = *MBB.getParent();
129 const ARMSubtarget &ST = MF.getSubtarget<ARMSubtarget>();
130 bool isHigh = !isARMLowRegister(DestReg) ||
131 (BaseReg != 0 && !isARMLowRegister(BaseReg));
132 bool isSub = false;
133 // Subtract doesn't have high register version. Load the negative value
134 // if either base or dest register is a high register. Also, if do not
135 // issue sub as part of the sequence if condition register is to be
136 // preserved.
137 if (NumBytes < 0 && !isHigh && CanChangeCC) {
138 isSub = true;
139 NumBytes = -NumBytes;
140 }
141 Register LdReg = DestReg;
142 if (DestReg == ARM::SP)
143 assert(BaseReg == ARM::SP && "Unexpected!");
144 if (!isARMLowRegister(DestReg) && !DestReg.isVirtual())
145 LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
146
147 if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
148 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg)
149 .add(t1CondCodeOp())
150 .addImm(NumBytes)
151 .setMIFlags(MIFlags);
152 } else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
153 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg)
154 .add(t1CondCodeOp())
155 .addImm(NumBytes)
156 .setMIFlags(MIFlags);
157 BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg)
158 .add(t1CondCodeOp())
159 .addReg(LdReg, RegState::Kill)
160 .setMIFlags(MIFlags);
161 } else if (ST.genExecuteOnly()) {
162 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi32imm), LdReg)
163 .addImm(NumBytes).setMIFlags(MIFlags);
164 } else
165 MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes, ARMCC::AL, 0,
166 MIFlags);
167
168 // Emit add / sub.
169 int Opc = (isSub) ? ARM::tSUBrr
170 : ((isHigh || !CanChangeCC) ? ARM::tADDhirr : ARM::tADDrr);
171 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
172 if (Opc != ARM::tADDhirr)
173 MIB = MIB.add(t1CondCodeOp());
174 if (DestReg == ARM::SP || isSub)
175 MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
176 else
177 MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
178 MIB.add(predOps(ARMCC::AL));
179 }
180
181 /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
182 /// a destreg = basereg + immediate in Thumb code. Tries a series of ADDs or
183 /// SUBs first, and uses a constant pool value if the instruction sequence would
184 /// be too long. This is allowed to modify the condition flags.
emitThumbRegPlusImmediate(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,Register DestReg,Register BaseReg,int NumBytes,const TargetInstrInfo & TII,const ARMBaseRegisterInfo & MRI,unsigned MIFlags)185 void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
186 MachineBasicBlock::iterator &MBBI,
187 const DebugLoc &dl, Register DestReg,
188 Register BaseReg, int NumBytes,
189 const TargetInstrInfo &TII,
190 const ARMBaseRegisterInfo &MRI,
191 unsigned MIFlags) {
192 bool isSub = NumBytes < 0;
193 unsigned Bytes = (unsigned)NumBytes;
194 if (isSub) Bytes = -NumBytes;
195
196 int CopyOpc = 0;
197 unsigned CopyBits = 0;
198 unsigned CopyScale = 1;
199 bool CopyNeedsCC = false;
200 int ExtraOpc = 0;
201 unsigned ExtraBits = 0;
202 unsigned ExtraScale = 1;
203 bool ExtraNeedsCC = false;
204
205 // Strategy:
206 // We need to select two types of instruction, maximizing the available
207 // immediate range of each. The instructions we use will depend on whether
208 // DestReg and BaseReg are low, high or the stack pointer.
209 // * CopyOpc - DestReg = BaseReg + imm
210 // This will be emitted once if DestReg != BaseReg, and never if
211 // DestReg == BaseReg.
212 // * ExtraOpc - DestReg = DestReg + imm
213 // This will be emitted as many times as necessary to add the
214 // full immediate.
215 // If the immediate ranges of these instructions are not large enough to cover
216 // NumBytes with a reasonable number of instructions, we fall back to using a
217 // value loaded from a constant pool.
218 if (DestReg == ARM::SP) {
219 if (BaseReg == ARM::SP) {
220 // sp -> sp
221 // Already in right reg, no copy needed
222 } else {
223 // low -> sp or high -> sp
224 CopyOpc = ARM::tMOVr;
225 CopyBits = 0;
226 }
227 ExtraOpc = isSub ? ARM::tSUBspi : ARM::tADDspi;
228 ExtraBits = 7;
229 ExtraScale = 4;
230 } else if (isARMLowRegister(DestReg)) {
231 if (BaseReg == ARM::SP) {
232 // sp -> low
233 assert(!isSub && "Thumb1 does not have tSUBrSPi");
234 CopyOpc = ARM::tADDrSPi;
235 CopyBits = 8;
236 CopyScale = 4;
237 } else if (DestReg == BaseReg) {
238 // low -> same low
239 // Already in right reg, no copy needed
240 } else if (isARMLowRegister(BaseReg)) {
241 // low -> different low
242 CopyOpc = isSub ? ARM::tSUBi3 : ARM::tADDi3;
243 CopyBits = 3;
244 CopyNeedsCC = true;
245 } else {
246 // high -> low
247 CopyOpc = ARM::tMOVr;
248 CopyBits = 0;
249 }
250 ExtraOpc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
251 ExtraBits = 8;
252 ExtraNeedsCC = true;
253 } else /* DestReg is high */ {
254 if (DestReg == BaseReg) {
255 // high -> same high
256 // Already in right reg, no copy needed
257 } else {
258 // {low,high,sp} -> high
259 CopyOpc = ARM::tMOVr;
260 CopyBits = 0;
261 }
262 ExtraOpc = 0;
263 }
264
265 // We could handle an unaligned immediate with an unaligned copy instruction
266 // and an aligned extra instruction, but this case is not currently needed.
267 assert(((Bytes & 3) == 0 || ExtraScale == 1) &&
268 "Unaligned offset, but all instructions require alignment");
269
270 unsigned CopyRange = ((1 << CopyBits) - 1) * CopyScale;
271 // If we would emit the copy with an immediate of 0, just use tMOVr.
272 if (CopyOpc && Bytes < CopyScale) {
273 CopyOpc = ARM::tMOVr;
274 CopyScale = 1;
275 CopyNeedsCC = false;
276 CopyRange = 0;
277 }
278 unsigned ExtraRange = ((1 << ExtraBits) - 1) * ExtraScale; // per instruction
279 unsigned RequiredCopyInstrs = CopyOpc ? 1 : 0;
280 unsigned RangeAfterCopy = (CopyRange > Bytes) ? 0 : (Bytes - CopyRange);
281
282 // We could handle this case when the copy instruction does not require an
283 // aligned immediate, but we do not currently do this.
284 assert(RangeAfterCopy % ExtraScale == 0 &&
285 "Extra instruction requires immediate to be aligned");
286
287 unsigned RequiredExtraInstrs;
288 if (ExtraRange)
289 RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
290 else if (RangeAfterCopy > 0)
291 // We need an extra instruction but none is available
292 RequiredExtraInstrs = 1000000;
293 else
294 RequiredExtraInstrs = 0;
295 unsigned RequiredInstrs = RequiredCopyInstrs + RequiredExtraInstrs;
296 unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
297
298 // Use a constant pool, if the sequence of ADDs/SUBs is too expensive.
299 if (RequiredInstrs > Threshold) {
300 emitThumbRegPlusImmInReg(MBB, MBBI, dl,
301 DestReg, BaseReg, NumBytes, true,
302 TII, MRI, MIFlags);
303 return;
304 }
305
306 // Emit zero or one copy instructions
307 if (CopyOpc) {
308 unsigned CopyImm = std::min(Bytes, CopyRange) / CopyScale;
309 Bytes -= CopyImm * CopyScale;
310
311 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(CopyOpc), DestReg);
312 if (CopyNeedsCC)
313 MIB = MIB.add(t1CondCodeOp());
314 MIB.addReg(BaseReg, RegState::Kill);
315 if (CopyOpc != ARM::tMOVr) {
316 MIB.addImm(CopyImm);
317 }
318 MIB.setMIFlags(MIFlags).add(predOps(ARMCC::AL));
319
320 BaseReg = DestReg;
321 }
322
323 // Emit zero or more in-place add/sub instructions
324 while (Bytes) {
325 unsigned ExtraImm = std::min(Bytes, ExtraRange) / ExtraScale;
326 Bytes -= ExtraImm * ExtraScale;
327
328 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg);
329 if (ExtraNeedsCC)
330 MIB = MIB.add(t1CondCodeOp());
331 MIB.addReg(BaseReg)
332 .addImm(ExtraImm)
333 .add(predOps(ARMCC::AL))
334 .setMIFlags(MIFlags);
335 }
336 }
337
removeOperands(MachineInstr & MI,unsigned i)338 static void removeOperands(MachineInstr &MI, unsigned i) {
339 unsigned Op = i;
340 for (unsigned e = MI.getNumOperands(); i != e; ++i)
341 MI.removeOperand(Op);
342 }
343
344 /// convertToNonSPOpcode - Change the opcode to the non-SP version, because
345 /// we're replacing the frame index with a non-SP register.
convertToNonSPOpcode(unsigned Opcode)346 static unsigned convertToNonSPOpcode(unsigned Opcode) {
347 switch (Opcode) {
348 case ARM::tLDRspi:
349 return ARM::tLDRi;
350
351 case ARM::tSTRspi:
352 return ARM::tSTRi;
353 }
354
355 return Opcode;
356 }
357
rewriteFrameIndex(MachineBasicBlock::iterator II,unsigned FrameRegIdx,Register FrameReg,int & Offset,const ARMBaseInstrInfo & TII) const358 bool ThumbRegisterInfo::rewriteFrameIndex(MachineBasicBlock::iterator II,
359 unsigned FrameRegIdx,
360 Register FrameReg, int &Offset,
361 const ARMBaseInstrInfo &TII) const {
362 MachineInstr &MI = *II;
363 MachineBasicBlock &MBB = *MI.getParent();
364 MachineFunction &MF = *MBB.getParent();
365 assert(MBB.getParent()->getSubtarget<ARMSubtarget>().isThumb1Only() &&
366 "This isn't needed for thumb2!");
367 DebugLoc dl = MI.getDebugLoc();
368 MachineInstrBuilder MIB(*MBB.getParent(), &MI);
369 unsigned Opcode = MI.getOpcode();
370 const MCInstrDesc &Desc = MI.getDesc();
371 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
372
373 if (Opcode == ARM::tADDframe) {
374 Offset += MI.getOperand(FrameRegIdx+1).getImm();
375 Register DestReg = MI.getOperand(0).getReg();
376
377 emitThumbRegPlusImmediate(MBB, II, dl, DestReg, FrameReg, Offset, TII,
378 *this);
379 MBB.erase(II);
380 return true;
381 } else {
382 if (AddrMode != ARMII::AddrModeT1_s)
383 llvm_unreachable("Unsupported addressing mode!");
384
385 unsigned ImmIdx = FrameRegIdx + 1;
386 int InstrOffs = MI.getOperand(ImmIdx).getImm();
387 unsigned NumBits = (FrameReg == ARM::SP) ? 8 : 5;
388 unsigned Scale = 4;
389
390 Offset += InstrOffs * Scale;
391 assert((Offset & (Scale - 1)) == 0 && "Can't encode this offset!");
392
393 // Common case: small offset, fits into instruction.
394 MachineOperand &ImmOp = MI.getOperand(ImmIdx);
395 int ImmedOffset = Offset / Scale;
396 unsigned Mask = (1 << NumBits) - 1;
397
398 if ((unsigned)Offset <= Mask * Scale) {
399 // Replace the FrameIndex with the frame register (e.g., sp).
400 Register DestReg = FrameReg;
401
402 // In case FrameReg is a high register, move it to a low reg to ensure it
403 // can be used as an operand.
404 if (ARM::hGPRRegClass.contains(FrameReg) && FrameReg != ARM::SP) {
405 DestReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
406 BuildMI(MBB, II, dl, TII.get(ARM::tMOVr), DestReg)
407 .addReg(FrameReg)
408 .add(predOps(ARMCC::AL));
409 }
410
411 MI.getOperand(FrameRegIdx).ChangeToRegister(DestReg, false);
412 ImmOp.ChangeToImmediate(ImmedOffset);
413
414 // If we're using a register where sp was stored, convert the instruction
415 // to the non-SP version.
416 unsigned NewOpc = convertToNonSPOpcode(Opcode);
417 if (NewOpc != Opcode && FrameReg != ARM::SP)
418 MI.setDesc(TII.get(NewOpc));
419
420 return true;
421 }
422
423 NumBits = 5;
424 Mask = (1 << NumBits) - 1;
425
426 // If this is a thumb spill / restore, we will be using a constpool load to
427 // materialize the offset.
428 if (Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
429 ImmOp.ChangeToImmediate(0);
430 } else {
431 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
432 ImmedOffset = ImmedOffset & Mask;
433 ImmOp.ChangeToImmediate(ImmedOffset);
434 Offset &= ~(Mask * Scale);
435 }
436 }
437
438 return Offset == 0;
439 }
440
resolveFrameIndex(MachineInstr & MI,Register BaseReg,int64_t Offset) const441 void ThumbRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
442 int64_t Offset) const {
443 const MachineFunction &MF = *MI.getParent()->getParent();
444 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
445 if (!STI.isThumb1Only())
446 return ARMBaseRegisterInfo::resolveFrameIndex(MI, BaseReg, Offset);
447
448 const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
449 int Off = Offset; // ARM doesn't need the general 64-bit offsets
450 unsigned i = 0;
451
452 while (!MI.getOperand(i).isFI()) {
453 ++i;
454 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
455 }
456 bool Done = rewriteFrameIndex(MI, i, BaseReg, Off, TII);
457 assert (Done && "Unable to resolve frame index!");
458 (void)Done;
459 }
460
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const461 bool ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
462 int SPAdj, unsigned FIOperandNum,
463 RegScavenger *RS) const {
464 MachineInstr &MI = *II;
465 MachineBasicBlock &MBB = *MI.getParent();
466 MachineFunction &MF = *MBB.getParent();
467 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
468 if (!STI.isThumb1Only())
469 return ARMBaseRegisterInfo::eliminateFrameIndex(II, SPAdj, FIOperandNum,
470 RS);
471
472 Register VReg;
473 const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
474 DebugLoc dl = MI.getDebugLoc();
475 MachineInstrBuilder MIB(*MBB.getParent(), &MI);
476
477 Register FrameReg;
478 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
479 const ARMFrameLowering *TFI = getFrameLowering(MF);
480 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
481
482 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
483 // call frame setup/destroy instructions have already been eliminated. That
484 // means the stack pointer cannot be used to access the emergency spill slot
485 // when !hasReservedCallFrame().
486 #ifndef NDEBUG
487 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
488 assert(STI.getFrameLowering()->hasReservedCallFrame(MF) &&
489 "Cannot use SP to access the emergency spill slot in "
490 "functions without a reserved call frame");
491 assert(!MF.getFrameInfo().hasVarSizedObjects() &&
492 "Cannot use SP to access the emergency spill slot in "
493 "functions with variable sized frame objects");
494 }
495 #endif // NDEBUG
496
497 // Special handling of dbg_value instructions.
498 if (MI.isDebugValue()) {
499 MI.getOperand(FIOperandNum). ChangeToRegister(FrameReg, false /*isDef*/);
500 MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
501 return false;
502 }
503
504 // Modify MI as necessary to handle as much of 'Offset' as possible
505 assert(MF.getInfo<ARMFunctionInfo>()->isThumbFunction() &&
506 "This eliminateFrameIndex only supports Thumb1!");
507 if (rewriteFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
508 return true;
509
510 // If we get here, the immediate doesn't fit into the instruction. We folded
511 // as much as possible above, handle the rest, providing a register that is
512 // SP+LargeImm.
513 assert(Offset && "This code isn't needed if offset already handled!");
514
515 unsigned Opcode = MI.getOpcode();
516
517 // Remove predicate first.
518 int PIdx = MI.findFirstPredOperandIdx();
519 if (PIdx != -1)
520 removeOperands(MI, PIdx);
521
522 if (MI.mayLoad()) {
523 // Use the destination register to materialize sp + offset.
524 Register TmpReg = MI.getOperand(0).getReg();
525 bool UseRR = false;
526 if (Opcode == ARM::tLDRspi) {
527 if (FrameReg == ARM::SP || STI.genExecuteOnly())
528 emitThumbRegPlusImmInReg(MBB, II, dl, TmpReg, FrameReg,
529 Offset, false, TII, *this);
530 else {
531 emitLoadConstPool(MBB, II, dl, TmpReg, 0, Offset);
532 if (!ARM::hGPRRegClass.contains(FrameReg)) {
533 UseRR = true;
534 } else {
535 // If FrameReg is a high register, add the reg values in a separate
536 // instruction as the load won't be able to access it.
537 BuildMI(MBB, II, dl, TII.get(ARM::tADDhirr), TmpReg)
538 .addReg(TmpReg)
539 .addReg(FrameReg)
540 .add(predOps(ARMCC::AL));
541 }
542 }
543 } else {
544 emitThumbRegPlusImmediate(MBB, II, dl, TmpReg, FrameReg, Offset, TII,
545 *this);
546 }
547
548 MI.setDesc(TII.get(UseRR ? ARM::tLDRr : ARM::tLDRi));
549 MI.getOperand(FIOperandNum).ChangeToRegister(TmpReg, false, false, true);
550 if (UseRR) {
551 assert(!ARM::hGPRRegClass.contains(FrameReg) &&
552 "Thumb1 loads can't use high register");
553 // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
554 // register. The offset is already handled in the vreg value.
555 MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
556 false);
557 }
558 } else if (MI.mayStore()) {
559 VReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
560 bool UseRR = false;
561
562 if (Opcode == ARM::tSTRspi) {
563 if (FrameReg == ARM::SP || STI.genExecuteOnly())
564 emitThumbRegPlusImmInReg(MBB, II, dl, VReg, FrameReg,
565 Offset, false, TII, *this);
566 else {
567 emitLoadConstPool(MBB, II, dl, VReg, 0, Offset);
568 if (!ARM::hGPRRegClass.contains(FrameReg)) {
569 UseRR = true;
570 } else {
571 // If FrameReg is a high register, add the reg values in a separate
572 // instruction as the load won't be able to access it.
573 BuildMI(MBB, II, dl, TII.get(ARM::tADDhirr), VReg)
574 .addReg(VReg)
575 .addReg(FrameReg)
576 .add(predOps(ARMCC::AL));
577 }
578 }
579 } else
580 emitThumbRegPlusImmediate(MBB, II, dl, VReg, FrameReg, Offset, TII,
581 *this);
582 MI.setDesc(TII.get(UseRR ? ARM::tSTRr : ARM::tSTRi));
583 MI.getOperand(FIOperandNum).ChangeToRegister(VReg, false, false, true);
584 if (UseRR) {
585 assert(!ARM::hGPRRegClass.contains(FrameReg) &&
586 "Thumb1 stores can't use high register");
587 // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
588 // register. The offset is already handled in the vreg value.
589 MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
590 false);
591 }
592 } else {
593 llvm_unreachable("Unexpected opcode!");
594 }
595
596 // Add predicate back if it's needed.
597 if (MI.isPredicable())
598 MIB.add(predOps(ARMCC::AL));
599 return false;
600 }
601
602 bool
useFPForScavengingIndex(const MachineFunction & MF) const603 ThumbRegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const {
604 if (MF.getSubtarget<ARMSubtarget>().isThumb1Only()) {
605 // For Thumb1, the emergency spill slot must be some small positive
606 // offset from the base/stack pointer.
607 return false;
608 }
609 // For Thumb2, put the emergency spill slot next to FP.
610 return true;
611 }
612