xref: /llvm-project/llvm/lib/Target/Sparc/SparcInstrInfo.cpp (revision f7d8336a2fb4fad4a6efe5af9b0a10ddd970f6d3)
1 //===-- SparcInstrInfo.cpp - Sparc Instruction Information ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Sparc implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SparcInstrInfo.h"
14 #include "Sparc.h"
15 #include "SparcMachineFunctionInfo.h"
16 #include "SparcSubtarget.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineMemOperand.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/Support/ErrorHandling.h"
23 
24 using namespace llvm;
25 
26 #define GET_INSTRINFO_CTOR_DTOR
27 #include "SparcGenInstrInfo.inc"
28 
29 static cl::opt<unsigned> BPccDisplacementBits(
30     "sparc-bpcc-offset-bits", cl::Hidden, cl::init(19),
31     cl::desc("Restrict range of BPcc/FBPfcc instructions (DEBUG)"));
32 
33 static cl::opt<unsigned>
34     BPrDisplacementBits("sparc-bpr-offset-bits", cl::Hidden, cl::init(16),
35                         cl::desc("Restrict range of BPr instructions (DEBUG)"));
36 
37 // Pin the vtable to this file.
38 void SparcInstrInfo::anchor() {}
39 
40 SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST)
41     : SparcGenInstrInfo(SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP), RI(),
42       Subtarget(ST) {}
43 
44 /// isLoadFromStackSlot - If the specified machine instruction is a direct
45 /// load from a stack slot, return the virtual or physical register number of
46 /// the destination along with the FrameIndex of the loaded stack slot.  If
47 /// not, return 0.  This predicate must return 0 if the instruction has
48 /// any side effects other than loading from the stack slot.
49 Register SparcInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
50                                              int &FrameIndex) const {
51   if (MI.getOpcode() == SP::LDri || MI.getOpcode() == SP::LDXri ||
52       MI.getOpcode() == SP::LDFri || MI.getOpcode() == SP::LDDFri ||
53       MI.getOpcode() == SP::LDQFri) {
54     if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
55         MI.getOperand(2).getImm() == 0) {
56       FrameIndex = MI.getOperand(1).getIndex();
57       return MI.getOperand(0).getReg();
58     }
59   }
60   return 0;
61 }
62 
63 /// isStoreToStackSlot - If the specified machine instruction is a direct
64 /// store to a stack slot, return the virtual or physical register number of
65 /// the source reg along with the FrameIndex of the loaded stack slot.  If
66 /// not, return 0.  This predicate must return 0 if the instruction has
67 /// any side effects other than storing to the stack slot.
68 Register SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
69                                             int &FrameIndex) const {
70   if (MI.getOpcode() == SP::STri || MI.getOpcode() == SP::STXri ||
71       MI.getOpcode() == SP::STFri || MI.getOpcode() == SP::STDFri ||
72       MI.getOpcode() == SP::STQFri) {
73     if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
74         MI.getOperand(1).getImm() == 0) {
75       FrameIndex = MI.getOperand(0).getIndex();
76       return MI.getOperand(2).getReg();
77     }
78   }
79   return 0;
80 }
81 
82 static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
83 {
84   switch(CC) {
85   case SPCC::ICC_A:    return SPCC::ICC_N;
86   case SPCC::ICC_N:    return SPCC::ICC_A;
87   case SPCC::ICC_NE:   return SPCC::ICC_E;
88   case SPCC::ICC_E:    return SPCC::ICC_NE;
89   case SPCC::ICC_G:    return SPCC::ICC_LE;
90   case SPCC::ICC_LE:   return SPCC::ICC_G;
91   case SPCC::ICC_GE:   return SPCC::ICC_L;
92   case SPCC::ICC_L:    return SPCC::ICC_GE;
93   case SPCC::ICC_GU:   return SPCC::ICC_LEU;
94   case SPCC::ICC_LEU:  return SPCC::ICC_GU;
95   case SPCC::ICC_CC:   return SPCC::ICC_CS;
96   case SPCC::ICC_CS:   return SPCC::ICC_CC;
97   case SPCC::ICC_POS:  return SPCC::ICC_NEG;
98   case SPCC::ICC_NEG:  return SPCC::ICC_POS;
99   case SPCC::ICC_VC:   return SPCC::ICC_VS;
100   case SPCC::ICC_VS:   return SPCC::ICC_VC;
101 
102   case SPCC::FCC_A:    return SPCC::FCC_N;
103   case SPCC::FCC_N:    return SPCC::FCC_A;
104   case SPCC::FCC_U:    return SPCC::FCC_O;
105   case SPCC::FCC_O:    return SPCC::FCC_U;
106   case SPCC::FCC_G:    return SPCC::FCC_ULE;
107   case SPCC::FCC_LE:   return SPCC::FCC_UG;
108   case SPCC::FCC_UG:   return SPCC::FCC_LE;
109   case SPCC::FCC_ULE:  return SPCC::FCC_G;
110   case SPCC::FCC_L:    return SPCC::FCC_UGE;
111   case SPCC::FCC_GE:   return SPCC::FCC_UL;
112   case SPCC::FCC_UL:   return SPCC::FCC_GE;
113   case SPCC::FCC_UGE:  return SPCC::FCC_L;
114   case SPCC::FCC_LG:   return SPCC::FCC_UE;
115   case SPCC::FCC_UE:   return SPCC::FCC_LG;
116   case SPCC::FCC_NE:   return SPCC::FCC_E;
117   case SPCC::FCC_E:    return SPCC::FCC_NE;
118 
119   case SPCC::CPCC_A:   return SPCC::CPCC_N;
120   case SPCC::CPCC_N:   return SPCC::CPCC_A;
121   case SPCC::CPCC_3:   [[fallthrough]];
122   case SPCC::CPCC_2:   [[fallthrough]];
123   case SPCC::CPCC_23:  [[fallthrough]];
124   case SPCC::CPCC_1:   [[fallthrough]];
125   case SPCC::CPCC_13:  [[fallthrough]];
126   case SPCC::CPCC_12:  [[fallthrough]];
127   case SPCC::CPCC_123: [[fallthrough]];
128   case SPCC::CPCC_0:   [[fallthrough]];
129   case SPCC::CPCC_03:  [[fallthrough]];
130   case SPCC::CPCC_02:  [[fallthrough]];
131   case SPCC::CPCC_023: [[fallthrough]];
132   case SPCC::CPCC_01:  [[fallthrough]];
133   case SPCC::CPCC_013: [[fallthrough]];
134   case SPCC::CPCC_012:
135       // "Opposite" code is not meaningful, as we don't know
136       // what the CoProc condition means here. The cond-code will
137       // only be used in inline assembler, so this code should
138       // not be reached in a normal compilation pass.
139       llvm_unreachable("Meaningless inversion of co-processor cond code");
140 
141   case SPCC::REG_BEGIN:
142       llvm_unreachable("Use of reserved cond code");
143   case SPCC::REG_Z:
144       return SPCC::REG_NZ;
145   case SPCC::REG_LEZ:
146       return SPCC::REG_GZ;
147   case SPCC::REG_LZ:
148       return SPCC::REG_GEZ;
149   case SPCC::REG_NZ:
150       return SPCC::REG_Z;
151   case SPCC::REG_GZ:
152       return SPCC::REG_LEZ;
153   case SPCC::REG_GEZ:
154       return SPCC::REG_LZ;
155   }
156   llvm_unreachable("Invalid cond code");
157 }
158 
159 static bool isUncondBranchOpcode(int Opc) { return Opc == SP::BA; }
160 
161 static bool isI32CondBranchOpcode(int Opc) {
162   return Opc == SP::BCOND || Opc == SP::BPICC || Opc == SP::BPICCA ||
163          Opc == SP::BPICCNT || Opc == SP::BPICCANT;
164 }
165 
166 static bool isI64CondBranchOpcode(int Opc) {
167   return Opc == SP::BPXCC || Opc == SP::BPXCCA || Opc == SP::BPXCCNT ||
168          Opc == SP::BPXCCANT;
169 }
170 
171 static bool isRegCondBranchOpcode(int Opc) {
172   return Opc == SP::BPR || Opc == SP::BPRA || Opc == SP::BPRNT ||
173          Opc == SP::BPRANT;
174 }
175 
176 static bool isFCondBranchOpcode(int Opc) {
177   return Opc == SP::FBCOND || Opc == SP::FBCONDA || Opc == SP::FBCOND_V9 ||
178          Opc == SP::FBCONDA_V9;
179 }
180 
181 static bool isCondBranchOpcode(int Opc) {
182   return isI32CondBranchOpcode(Opc) || isI64CondBranchOpcode(Opc) ||
183          isRegCondBranchOpcode(Opc) || isFCondBranchOpcode(Opc);
184 }
185 
186 static bool isIndirectBranchOpcode(int Opc) {
187   return Opc == SP::BINDrr || Opc == SP::BINDri;
188 }
189 
190 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
191                             SmallVectorImpl<MachineOperand> &Cond) {
192   unsigned Opc = LastInst->getOpcode();
193   int64_t CC = LastInst->getOperand(1).getImm();
194 
195   // Push the branch opcode into Cond too so later in insertBranch
196   // it can use the information to emit the correct SPARC branch opcode.
197   Cond.push_back(MachineOperand::CreateImm(Opc));
198   Cond.push_back(MachineOperand::CreateImm(CC));
199 
200   // Branch on register contents need another argument to indicate
201   // the register it branches on.
202   if (isRegCondBranchOpcode(Opc)) {
203       Register Reg = LastInst->getOperand(2).getReg();
204       Cond.push_back(MachineOperand::CreateReg(Reg, false));
205   }
206 
207   Target = LastInst->getOperand(0).getMBB();
208 }
209 
210 MachineBasicBlock *
211 SparcInstrInfo::getBranchDestBlock(const MachineInstr &MI) const {
212   switch (MI.getOpcode()) {
213   default:
214       llvm_unreachable("unexpected opcode!");
215   case SP::BA:
216   case SP::BCOND:
217   case SP::BCONDA:
218   case SP::FBCOND:
219   case SP::FBCONDA:
220   case SP::BPICC:
221   case SP::BPICCA:
222   case SP::BPICCNT:
223   case SP::BPICCANT:
224   case SP::BPXCC:
225   case SP::BPXCCA:
226   case SP::BPXCCNT:
227   case SP::BPXCCANT:
228   case SP::BPFCC:
229   case SP::BPFCCA:
230   case SP::BPFCCNT:
231   case SP::BPFCCANT:
232   case SP::FBCOND_V9:
233   case SP::FBCONDA_V9:
234   case SP::BPR:
235   case SP::BPRA:
236   case SP::BPRNT:
237   case SP::BPRANT:
238       return MI.getOperand(0).getMBB();
239   }
240 }
241 
242 bool SparcInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
243                                    MachineBasicBlock *&TBB,
244                                    MachineBasicBlock *&FBB,
245                                    SmallVectorImpl<MachineOperand> &Cond,
246                                    bool AllowModify) const {
247   MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
248   if (I == MBB.end())
249     return false;
250 
251   if (!isUnpredicatedTerminator(*I))
252     return false;
253 
254   // Get the last instruction in the block.
255   MachineInstr *LastInst = &*I;
256   unsigned LastOpc = LastInst->getOpcode();
257 
258   // If there is only one terminator instruction, process it.
259   if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
260     if (isUncondBranchOpcode(LastOpc)) {
261       TBB = LastInst->getOperand(0).getMBB();
262       return false;
263     }
264     if (isCondBranchOpcode(LastOpc)) {
265       // Block ends with fall-through condbranch.
266       parseCondBranch(LastInst, TBB, Cond);
267       return false;
268     }
269     return true; // Can't handle indirect branch.
270   }
271 
272   // Get the instruction before it if it is a terminator.
273   MachineInstr *SecondLastInst = &*I;
274   unsigned SecondLastOpc = SecondLastInst->getOpcode();
275 
276   // If AllowModify is true and the block ends with two or more unconditional
277   // branches, delete all but the first unconditional branch.
278   if (AllowModify && isUncondBranchOpcode(LastOpc)) {
279     while (isUncondBranchOpcode(SecondLastOpc)) {
280       LastInst->eraseFromParent();
281       LastInst = SecondLastInst;
282       LastOpc = LastInst->getOpcode();
283       if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
284         // Return now the only terminator is an unconditional branch.
285         TBB = LastInst->getOperand(0).getMBB();
286         return false;
287       } else {
288         SecondLastInst = &*I;
289         SecondLastOpc = SecondLastInst->getOpcode();
290       }
291     }
292   }
293 
294   // If there are three terminators, we don't know what sort of block this is.
295   if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
296     return true;
297 
298   // If the block ends with a B and a Bcc, handle it.
299   if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
300     parseCondBranch(SecondLastInst, TBB, Cond);
301     FBB = LastInst->getOperand(0).getMBB();
302     return false;
303   }
304 
305   // If the block ends with two unconditional branches, handle it.  The second
306   // one is not executed.
307   if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
308     TBB = SecondLastInst->getOperand(0).getMBB();
309     return false;
310   }
311 
312   // ...likewise if it ends with an indirect branch followed by an unconditional
313   // branch.
314   if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
315     I = LastInst;
316     if (AllowModify)
317       I->eraseFromParent();
318     return true;
319   }
320 
321   // Otherwise, can't handle this.
322   return true;
323 }
324 
325 unsigned SparcInstrInfo::insertBranch(MachineBasicBlock &MBB,
326                                       MachineBasicBlock *TBB,
327                                       MachineBasicBlock *FBB,
328                                       ArrayRef<MachineOperand> Cond,
329                                       const DebugLoc &DL,
330                                       int *BytesAdded) const {
331   assert(TBB && "insertBranch must not be told to insert a fallthrough");
332   assert((Cond.size() <= 3) &&
333          "Sparc branch conditions should have at most three components!");
334 
335   if (Cond.empty()) {
336     assert(!FBB && "Unconditional branch with multiple successors!");
337     BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB);
338     if (BytesAdded)
339       *BytesAdded = 8;
340     return 1;
341   }
342 
343   // Conditional branch
344   unsigned Opc = Cond[0].getImm();
345   unsigned CC = Cond[1].getImm();
346   if (isRegCondBranchOpcode(Opc)) {
347     Register Reg = Cond[2].getReg();
348     BuildMI(&MBB, DL, get(Opc)).addMBB(TBB).addImm(CC).addReg(Reg);
349   } else {
350     BuildMI(&MBB, DL, get(Opc)).addMBB(TBB).addImm(CC);
351   }
352 
353   if (!FBB) {
354     if (BytesAdded)
355       *BytesAdded = 8;
356     return 1;
357   }
358 
359   BuildMI(&MBB, DL, get(SP::BA)).addMBB(FBB);
360   if (BytesAdded)
361     *BytesAdded = 16;
362   return 2;
363 }
364 
365 unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB,
366                                       int *BytesRemoved) const {
367   MachineBasicBlock::iterator I = MBB.end();
368   unsigned Count = 0;
369   int Removed = 0;
370   while (I != MBB.begin()) {
371     --I;
372 
373     if (I->isDebugInstr())
374       continue;
375 
376     if (!isCondBranchOpcode(I->getOpcode()) &&
377         !isUncondBranchOpcode(I->getOpcode()))
378       break; // Not a branch
379 
380     Removed += getInstSizeInBytes(*I);
381     I->eraseFromParent();
382     I = MBB.end();
383     ++Count;
384   }
385 
386   if (BytesRemoved)
387     *BytesRemoved = Removed;
388   return Count;
389 }
390 
391 bool SparcInstrInfo::reverseBranchCondition(
392     SmallVectorImpl<MachineOperand> &Cond) const {
393   assert(Cond.size() <= 3);
394   SPCC::CondCodes CC = static_cast<SPCC::CondCodes>(Cond[1].getImm());
395   Cond[1].setImm(GetOppositeBranchCondition(CC));
396   return false;
397 }
398 
399 bool SparcInstrInfo::isBranchOffsetInRange(unsigned BranchOpc,
400                                            int64_t Offset) const {
401   assert((Offset & 0b11) == 0 && "Malformed branch offset");
402   switch (BranchOpc) {
403   case SP::BA:
404   case SP::BCOND:
405   case SP::BCONDA:
406   case SP::FBCOND:
407   case SP::FBCONDA:
408     return isIntN(22, Offset >> 2);
409 
410   case SP::BPICC:
411   case SP::BPICCA:
412   case SP::BPICCNT:
413   case SP::BPICCANT:
414   case SP::BPXCC:
415   case SP::BPXCCA:
416   case SP::BPXCCNT:
417   case SP::BPXCCANT:
418   case SP::BPFCC:
419   case SP::BPFCCA:
420   case SP::BPFCCNT:
421   case SP::BPFCCANT:
422   case SP::FBCOND_V9:
423   case SP::FBCONDA_V9:
424     return isIntN(BPccDisplacementBits, Offset >> 2);
425 
426   case SP::BPR:
427   case SP::BPRA:
428   case SP::BPRNT:
429   case SP::BPRANT:
430     return isIntN(BPrDisplacementBits, Offset >> 2);
431   }
432 
433   llvm_unreachable("Unknown branch instruction!");
434 }
435 
436 void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
437                                  MachineBasicBlock::iterator I,
438                                  const DebugLoc &DL, MCRegister DestReg,
439                                  MCRegister SrcReg, bool KillSrc,
440                                  bool RenamableDest, bool RenamableSrc) const {
441   unsigned numSubRegs = 0;
442   unsigned movOpc     = 0;
443   const unsigned *subRegIdx = nullptr;
444   bool ExtraG0 = false;
445 
446   const unsigned DW_SubRegsIdx[]  = { SP::sub_even, SP::sub_odd };
447   const unsigned DFP_FP_SubRegsIdx[]  = { SP::sub_even, SP::sub_odd };
448   const unsigned QFP_DFP_SubRegsIdx[] = { SP::sub_even64, SP::sub_odd64 };
449   const unsigned QFP_FP_SubRegsIdx[]  = { SP::sub_even, SP::sub_odd,
450                                           SP::sub_odd64_then_sub_even,
451                                           SP::sub_odd64_then_sub_odd };
452 
453   if (SP::IntRegsRegClass.contains(DestReg, SrcReg))
454     BuildMI(MBB, I, DL, get(SP::ORrr), DestReg).addReg(SP::G0)
455       .addReg(SrcReg, getKillRegState(KillSrc));
456   else if (SP::IntPairRegClass.contains(DestReg, SrcReg)) {
457     subRegIdx  = DW_SubRegsIdx;
458     numSubRegs = 2;
459     movOpc     = SP::ORrr;
460     ExtraG0 = true;
461   } else if (SP::FPRegsRegClass.contains(DestReg, SrcReg))
462     BuildMI(MBB, I, DL, get(SP::FMOVS), DestReg)
463       .addReg(SrcReg, getKillRegState(KillSrc));
464   else if (SP::DFPRegsRegClass.contains(DestReg, SrcReg)) {
465     if (Subtarget.isV9()) {
466       BuildMI(MBB, I, DL, get(SP::FMOVD), DestReg)
467         .addReg(SrcReg, getKillRegState(KillSrc));
468     } else {
469       // Use two FMOVS instructions.
470       subRegIdx  = DFP_FP_SubRegsIdx;
471       numSubRegs = 2;
472       movOpc     = SP::FMOVS;
473     }
474   } else if (SP::QFPRegsRegClass.contains(DestReg, SrcReg)) {
475     if (Subtarget.isV9()) {
476       if (Subtarget.hasHardQuad()) {
477         BuildMI(MBB, I, DL, get(SP::FMOVQ), DestReg)
478           .addReg(SrcReg, getKillRegState(KillSrc));
479       } else {
480         // Use two FMOVD instructions.
481         subRegIdx  = QFP_DFP_SubRegsIdx;
482         numSubRegs = 2;
483         movOpc     = SP::FMOVD;
484       }
485     } else {
486       // Use four FMOVS instructions.
487       subRegIdx  = QFP_FP_SubRegsIdx;
488       numSubRegs = 4;
489       movOpc     = SP::FMOVS;
490     }
491   } else if (SP::ASRRegsRegClass.contains(DestReg) &&
492              SP::IntRegsRegClass.contains(SrcReg)) {
493     BuildMI(MBB, I, DL, get(SP::WRASRrr), DestReg)
494         .addReg(SP::G0)
495         .addReg(SrcReg, getKillRegState(KillSrc));
496   } else if (SP::IntRegsRegClass.contains(DestReg) &&
497              SP::ASRRegsRegClass.contains(SrcReg)) {
498     BuildMI(MBB, I, DL, get(SP::RDASR), DestReg)
499         .addReg(SrcReg, getKillRegState(KillSrc));
500   } else
501     llvm_unreachable("Impossible reg-to-reg copy");
502 
503   if (numSubRegs == 0 || subRegIdx == nullptr || movOpc == 0)
504     return;
505 
506   const TargetRegisterInfo *TRI = &getRegisterInfo();
507   MachineInstr *MovMI = nullptr;
508 
509   for (unsigned i = 0; i != numSubRegs; ++i) {
510     Register Dst = TRI->getSubReg(DestReg, subRegIdx[i]);
511     Register Src = TRI->getSubReg(SrcReg, subRegIdx[i]);
512     assert(Dst && Src && "Bad sub-register");
513 
514     MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(movOpc), Dst);
515     if (ExtraG0)
516       MIB.addReg(SP::G0);
517     MIB.addReg(Src);
518     MovMI = MIB.getInstr();
519   }
520   // Add implicit super-register defs and kills to the last MovMI.
521   MovMI->addRegisterDefined(DestReg, TRI);
522   if (KillSrc)
523     MovMI->addRegisterKilled(SrcReg, TRI);
524 }
525 
526 void SparcInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
527                                          MachineBasicBlock::iterator I,
528                                          Register SrcReg, bool isKill, int FI,
529                                          const TargetRegisterClass *RC,
530                                          const TargetRegisterInfo *TRI,
531                                          Register VReg,
532                                          MachineInstr::MIFlag Flags) const {
533   DebugLoc DL;
534   if (I != MBB.end()) DL = I->getDebugLoc();
535 
536   MachineFunction *MF = MBB.getParent();
537   const MachineFrameInfo &MFI = MF->getFrameInfo();
538   MachineMemOperand *MMO = MF->getMachineMemOperand(
539       MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
540       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
541 
542   // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
543   if (RC == &SP::I64RegsRegClass)
544     BuildMI(MBB, I, DL, get(SP::STXri)).addFrameIndex(FI).addImm(0)
545       .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
546   else if (RC == &SP::IntRegsRegClass)
547     BuildMI(MBB, I, DL, get(SP::STri)).addFrameIndex(FI).addImm(0)
548       .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
549   else if (RC == &SP::IntPairRegClass)
550     BuildMI(MBB, I, DL, get(SP::STDri)).addFrameIndex(FI).addImm(0)
551       .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
552   else if (RC == &SP::FPRegsRegClass)
553     BuildMI(MBB, I, DL, get(SP::STFri)).addFrameIndex(FI).addImm(0)
554       .addReg(SrcReg,  getKillRegState(isKill)).addMemOperand(MMO);
555   else if (SP::DFPRegsRegClass.hasSubClassEq(RC))
556     BuildMI(MBB, I, DL, get(SP::STDFri)).addFrameIndex(FI).addImm(0)
557       .addReg(SrcReg,  getKillRegState(isKill)).addMemOperand(MMO);
558   else if (SP::QFPRegsRegClass.hasSubClassEq(RC))
559     // Use STQFri irrespective of its legality. If STQ is not legal, it will be
560     // lowered into two STDs in eliminateFrameIndex.
561     BuildMI(MBB, I, DL, get(SP::STQFri)).addFrameIndex(FI).addImm(0)
562       .addReg(SrcReg,  getKillRegState(isKill)).addMemOperand(MMO);
563   else
564     llvm_unreachable("Can't store this register to stack slot");
565 }
566 
567 void SparcInstrInfo::loadRegFromStackSlot(
568     MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
569     int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
570     Register VReg, MachineInstr::MIFlag Flags) const {
571   DebugLoc DL;
572   if (I != MBB.end()) DL = I->getDebugLoc();
573 
574   MachineFunction *MF = MBB.getParent();
575   const MachineFrameInfo &MFI = MF->getFrameInfo();
576   MachineMemOperand *MMO = MF->getMachineMemOperand(
577       MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
578       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
579 
580   if (RC == &SP::I64RegsRegClass)
581     BuildMI(MBB, I, DL, get(SP::LDXri), DestReg).addFrameIndex(FI).addImm(0)
582       .addMemOperand(MMO);
583   else if (RC == &SP::IntRegsRegClass)
584     BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0)
585       .addMemOperand(MMO);
586   else if (RC == &SP::IntPairRegClass)
587     BuildMI(MBB, I, DL, get(SP::LDDri), DestReg).addFrameIndex(FI).addImm(0)
588       .addMemOperand(MMO);
589   else if (RC == &SP::FPRegsRegClass)
590     BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0)
591       .addMemOperand(MMO);
592   else if (SP::DFPRegsRegClass.hasSubClassEq(RC))
593     BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0)
594       .addMemOperand(MMO);
595   else if (SP::QFPRegsRegClass.hasSubClassEq(RC))
596     // Use LDQFri irrespective of its legality. If LDQ is not legal, it will be
597     // lowered into two LDDs in eliminateFrameIndex.
598     BuildMI(MBB, I, DL, get(SP::LDQFri), DestReg).addFrameIndex(FI).addImm(0)
599       .addMemOperand(MMO);
600   else
601     llvm_unreachable("Can't load this register from stack slot");
602 }
603 
604 Register SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
605   SparcMachineFunctionInfo *SparcFI = MF->getInfo<SparcMachineFunctionInfo>();
606   Register GlobalBaseReg = SparcFI->getGlobalBaseReg();
607   if (GlobalBaseReg)
608     return GlobalBaseReg;
609 
610   // Insert the set of GlobalBaseReg into the first MBB of the function
611   MachineBasicBlock &FirstMBB = MF->front();
612   MachineBasicBlock::iterator MBBI = FirstMBB.begin();
613   MachineRegisterInfo &RegInfo = MF->getRegInfo();
614 
615   const TargetRegisterClass *PtrRC =
616     Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
617   GlobalBaseReg = RegInfo.createVirtualRegister(PtrRC);
618 
619   DebugLoc dl;
620 
621   BuildMI(FirstMBB, MBBI, dl, get(SP::GETPCX), GlobalBaseReg);
622   SparcFI->setGlobalBaseReg(GlobalBaseReg);
623   return GlobalBaseReg;
624 }
625 
626 unsigned SparcInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
627   unsigned Opcode = MI.getOpcode();
628 
629   if (MI.isInlineAsm()) {
630     const MachineFunction *MF = MI.getParent()->getParent();
631     const char *AsmStr = MI.getOperand(0).getSymbolName();
632     return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
633   }
634 
635   // If the instruction has a delay slot, be conservative and also include
636   // it for sizing purposes. This is done so that the BranchRelaxation pass
637   // will not mistakenly mark out-of-range branches as in-range.
638   if (MI.hasDelaySlot())
639     return get(Opcode).getSize() * 2;
640   return get(Opcode).getSize();
641 }
642 
643 bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
644   switch (MI.getOpcode()) {
645   case TargetOpcode::LOAD_STACK_GUARD: {
646     assert(Subtarget.isTargetLinux() &&
647            "Only Linux target is expected to contain LOAD_STACK_GUARD");
648     // offsetof(tcbhead_t, stack_guard) from sysdeps/sparc/nptl/tls.h in glibc.
649     const int64_t Offset = Subtarget.is64Bit() ? 0x28 : 0x14;
650     MI.setDesc(get(Subtarget.is64Bit() ? SP::LDXri : SP::LDri));
651     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
652         .addReg(SP::G7)
653         .addImm(Offset);
654     return true;
655   }
656   }
657   return false;
658 }
659