xref: /llvm-project/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp (revision dc04d414df9c243bb90d7cfc683a632a2c032c62)
1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "SystemZRegisterInfo.h"
10 #include "SystemZInstrInfo.h"
11 #include "SystemZSubtarget.h"
12 #include "llvm/ADT/SmallSet.h"
13 #include "llvm/CodeGen/LiveIntervals.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetFrameLowering.h"
17 #include "llvm/CodeGen/VirtRegMap.h"
18 #include "llvm/IR/DebugInfoMetadata.h"
19 
20 using namespace llvm;
21 
22 #define GET_REGINFO_TARGET_DESC
23 #include "SystemZGenRegisterInfo.inc"
24 
25 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO
26 // somehow belongs in it. Otherwise, return GRX32.
27 static const TargetRegisterClass *getRC32(MachineOperand &MO,
28                                           const VirtRegMap *VRM,
29                                           const MachineRegisterInfo *MRI) {
30   const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg());
31 
32   if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
33       MO.getSubReg() == SystemZ::subreg_ll32 ||
34       MO.getSubReg() == SystemZ::subreg_l32)
35     return &SystemZ::GR32BitRegClass;
36   if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) ||
37       MO.getSubReg() == SystemZ::subreg_lh32 ||
38       MO.getSubReg() == SystemZ::subreg_h32)
39     return &SystemZ::GRH32BitRegClass;
40 
41   if (VRM && VRM->hasPhys(MO.getReg())) {
42     Register PhysReg = VRM->getPhys(MO.getReg());
43     if (SystemZ::GR32BitRegClass.contains(PhysReg))
44       return &SystemZ::GR32BitRegClass;
45     assert (SystemZ::GRH32BitRegClass.contains(PhysReg) &&
46             "Phys reg not in GR32 or GRH32?");
47     return &SystemZ::GRH32BitRegClass;
48   }
49 
50   assert (RC == &SystemZ::GRX32BitRegClass);
51   return RC;
52 }
53 
54 // Pass the registers of RC as hints while making sure that if any of these
55 // registers are copy hints (and therefore already in Hints), hint them
56 // first.
57 static void addHints(ArrayRef<MCPhysReg> Order,
58                      SmallVectorImpl<MCPhysReg> &Hints,
59                      const TargetRegisterClass *RC,
60                      const MachineRegisterInfo *MRI) {
61   SmallSet<unsigned, 4> CopyHints;
62   CopyHints.insert(Hints.begin(), Hints.end());
63   Hints.clear();
64   for (MCPhysReg Reg : Order)
65     if (CopyHints.count(Reg) &&
66         RC->contains(Reg) && !MRI->isReserved(Reg))
67       Hints.push_back(Reg);
68   for (MCPhysReg Reg : Order)
69     if (!CopyHints.count(Reg) &&
70         RC->contains(Reg) && !MRI->isReserved(Reg))
71       Hints.push_back(Reg);
72 }
73 
74 bool SystemZRegisterInfo::getRegAllocationHints(
75     Register VirtReg, ArrayRef<MCPhysReg> Order,
76     SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
77     const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
78   const MachineRegisterInfo *MRI = &MF.getRegInfo();
79   const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
80   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
81 
82   bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
83       VirtReg, Order, Hints, MF, VRM, Matrix);
84 
85   if (VRM != nullptr) {
86     // Add any two address hints after any copy hints.
87     SmallSet<unsigned, 4> TwoAddrHints;
88     for (auto &Use : MRI->reg_nodbg_instructions(VirtReg))
89       if (SystemZ::getTwoOperandOpcode(Use.getOpcode()) != -1) {
90         const MachineOperand *VRRegMO = nullptr;
91         const MachineOperand *OtherMO = nullptr;
92         const MachineOperand *CommuMO = nullptr;
93         if (VirtReg == Use.getOperand(0).getReg()) {
94           VRRegMO = &Use.getOperand(0);
95           OtherMO = &Use.getOperand(1);
96           if (Use.isCommutable())
97             CommuMO = &Use.getOperand(2);
98         } else if (VirtReg == Use.getOperand(1).getReg()) {
99           VRRegMO = &Use.getOperand(1);
100           OtherMO = &Use.getOperand(0);
101         } else if (VirtReg == Use.getOperand(2).getReg() &&
102                    Use.isCommutable()) {
103           VRRegMO = &Use.getOperand(2);
104           OtherMO = &Use.getOperand(0);
105         } else
106           continue;
107 
108         auto tryAddHint = [&](const MachineOperand *MO) -> void {
109           Register Reg = MO->getReg();
110           Register PhysReg =
111               Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
112           if (PhysReg) {
113             if (MO->getSubReg())
114               PhysReg = getSubReg(PhysReg, MO->getSubReg());
115             if (VRRegMO->getSubReg())
116               PhysReg = getMatchingSuperReg(PhysReg, VRRegMO->getSubReg(),
117                                             MRI->getRegClass(VirtReg));
118             if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
119               TwoAddrHints.insert(PhysReg);
120           }
121         };
122         tryAddHint(OtherMO);
123         if (CommuMO)
124           tryAddHint(CommuMO);
125       }
126     for (MCPhysReg OrderReg : Order)
127       if (TwoAddrHints.count(OrderReg))
128         Hints.push_back(OrderReg);
129   }
130 
131   if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) {
132     SmallVector<Register, 8> Worklist;
133     SmallSet<Register, 4> DoneRegs;
134     Worklist.push_back(VirtReg);
135     while (Worklist.size()) {
136       Register Reg = Worklist.pop_back_val();
137       if (!DoneRegs.insert(Reg).second)
138         continue;
139 
140       for (auto &Use : MRI->reg_instructions(Reg)) {
141         // For LOCRMux, see if the other operand is already a high or low
142         // register, and in that case give the corresponding hints for
143         // VirtReg. LOCR instructions need both operands in either high or
144         // low parts. Same handling for SELRMux.
145         if (Use.getOpcode() == SystemZ::LOCRMux ||
146             Use.getOpcode() == SystemZ::SELRMux) {
147           MachineOperand &TrueMO = Use.getOperand(1);
148           MachineOperand &FalseMO = Use.getOperand(2);
149           const TargetRegisterClass *RC =
150             TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI),
151                                    getRC32(TrueMO, VRM, MRI));
152           if (Use.getOpcode() == SystemZ::SELRMux)
153             RC = TRI->getCommonSubClass(RC,
154                                         getRC32(Use.getOperand(0), VRM, MRI));
155           if (RC && RC != &SystemZ::GRX32BitRegClass) {
156             addHints(Order, Hints, RC, MRI);
157             // Return true to make these hints the only regs available to
158             // RA. This may mean extra spilling but since the alternative is
159             // a jump sequence expansion of the LOCRMux, it is preferred.
160             return true;
161           }
162 
163           // Add the other operand of the LOCRMux to the worklist.
164           Register OtherReg =
165               (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());
166           if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)
167             Worklist.push_back(OtherReg);
168         } // end LOCRMux
169         else if (Use.getOpcode() == SystemZ::CHIMux ||
170                  Use.getOpcode() == SystemZ::CFIMux) {
171           if (Use.getOperand(1).getImm() == 0) {
172             bool OnlyLMuxes = true;
173             for (MachineInstr &DefMI : MRI->def_instructions(VirtReg))
174               if (DefMI.getOpcode() != SystemZ::LMux)
175                 OnlyLMuxes = false;
176             if (OnlyLMuxes) {
177               addHints(Order, Hints, &SystemZ::GR32BitRegClass, MRI);
178               // Return false to make these hints preferred but not obligatory.
179               return false;
180             }
181           }
182         } // end CHIMux / CFIMux
183       }
184     }
185   }
186 
187   return BaseImplRetVal;
188 }
189 
190 const MCPhysReg *
191 SystemZXPLINK64Registers::getCalleeSavedRegs(const MachineFunction *MF) const {
192   const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>();
193   return Subtarget.hasVector() ? CSR_SystemZ_XPLINK64_Vector_SaveList
194                                : CSR_SystemZ_XPLINK64_SaveList;
195 }
196 
197 const MCPhysReg *
198 SystemZELFRegisters::getCalleeSavedRegs(const MachineFunction *MF) const {
199   const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>();
200   if (MF->getFunction().getCallingConv() == CallingConv::GHC)
201     return CSR_SystemZ_NoRegs_SaveList;
202   if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
203     return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList
204                                 : CSR_SystemZ_AllRegs_SaveList;
205   if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
206       MF->getFunction().getAttributes().hasAttrSomewhere(
207           Attribute::SwiftError))
208     return CSR_SystemZ_SwiftError_SaveList;
209   return CSR_SystemZ_ELF_SaveList;
210 }
211 
212 const uint32_t *
213 SystemZXPLINK64Registers::getCallPreservedMask(const MachineFunction &MF,
214                                                CallingConv::ID CC) const {
215   const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
216   return Subtarget.hasVector() ? CSR_SystemZ_XPLINK64_Vector_RegMask
217                                : CSR_SystemZ_XPLINK64_RegMask;
218 }
219 
220 const uint32_t *
221 SystemZELFRegisters::getCallPreservedMask(const MachineFunction &MF,
222                                           CallingConv::ID CC) const {
223   const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
224   if (CC == CallingConv::GHC)
225     return CSR_SystemZ_NoRegs_RegMask;
226   if (CC == CallingConv::AnyReg)
227     return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask
228                                 : CSR_SystemZ_AllRegs_RegMask;
229   if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
230       MF.getFunction().getAttributes().hasAttrSomewhere(
231           Attribute::SwiftError))
232     return CSR_SystemZ_SwiftError_RegMask;
233   return CSR_SystemZ_ELF_RegMask;
234 }
235 
236 SystemZRegisterInfo::SystemZRegisterInfo(unsigned int RA)
237     : SystemZGenRegisterInfo(RA) {}
238 
239 const MCPhysReg *
240 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
241 
242   const SystemZSubtarget *Subtarget = &MF->getSubtarget<SystemZSubtarget>();
243   SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
244 
245   return Regs->getCalleeSavedRegs(MF);
246 }
247 
248 const uint32_t *
249 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
250                                           CallingConv::ID CC) const {
251 
252   const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
253   SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
254   return Regs->getCallPreservedMask(MF, CC);
255 }
256 
257 const uint32_t *SystemZRegisterInfo::getNoPreservedMask() const {
258   return CSR_SystemZ_NoRegs_RegMask;
259 }
260 
261 BitVector
262 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
263   BitVector Reserved(getNumRegs());
264   const SystemZFrameLowering *TFI = getFrameLowering(MF);
265   const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
266   SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
267   if (TFI->hasFP(MF))
268     // The frame pointer. Reserve all aliases.
269     for (MCRegAliasIterator AI(Regs->getFramePointerRegister(), this, true);
270          AI.isValid(); ++AI)
271       Reserved.set(*AI);
272 
273   // Reserve all aliases for the stack pointer.
274   for (MCRegAliasIterator AI(Regs->getStackPointerRegister(), this, true);
275        AI.isValid(); ++AI)
276     Reserved.set(*AI);
277 
278   // A0 and A1 hold the thread pointer.
279   Reserved.set(SystemZ::A0);
280   Reserved.set(SystemZ::A1);
281 
282   // FPC is the floating-point control register.
283   Reserved.set(SystemZ::FPC);
284 
285   return Reserved;
286 }
287 
288 bool
289 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
290                                          int SPAdj, unsigned FIOperandNum,
291                                          RegScavenger *RS) const {
292   assert(SPAdj == 0 && "Outgoing arguments should be part of the frame");
293 
294   MachineBasicBlock &MBB = *MI->getParent();
295   MachineFunction &MF = *MBB.getParent();
296   auto *TII = MF.getSubtarget<SystemZSubtarget>().getInstrInfo();
297   const SystemZFrameLowering *TFI = getFrameLowering(MF);
298   DebugLoc DL = MI->getDebugLoc();
299 
300   // Decompose the frame index into a base and offset.
301   int FrameIndex = MI->getOperand(FIOperandNum).getIndex();
302   Register BasePtr;
303   int64_t Offset =
304       (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed() +
305        MI->getOperand(FIOperandNum + 1).getImm());
306 
307   // Special handling of dbg_value instructions.
308   if (MI->isDebugValue()) {
309     MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false);
310     if (MI->isNonListDebugValue()) {
311       MI->getDebugOffset().ChangeToImmediate(Offset);
312     } else {
313       unsigned OpIdx = MI->getDebugOperandIndex(&MI->getOperand(FIOperandNum));
314       SmallVector<uint64_t, 3> Ops;
315       DIExpression::appendOffset(
316           Ops, TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed());
317       MI->getDebugExpressionOp().setMetadata(
318           DIExpression::appendOpsToArg(MI->getDebugExpression(), Ops, OpIdx));
319     }
320     return false;
321   }
322 
323   // See if the offset is in range, or if an equivalent instruction that
324   // accepts the offset exists.
325   unsigned Opcode = MI->getOpcode();
326   unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset, &*MI);
327   if (OpcodeForOffset) {
328     if (OpcodeForOffset == SystemZ::LE &&
329         MF.getSubtarget<SystemZSubtarget>().hasVector()) {
330       // If LE is ok for offset, use LDE instead on z13.
331       OpcodeForOffset = SystemZ::LDE32;
332     }
333     MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
334   }
335   else {
336     // Create an anchor point that is in range.  Start at 0xffff so that
337     // can use LLILH to load the immediate.
338     int64_t OldOffset = Offset;
339     int64_t Mask = 0xffff;
340     do {
341       Offset = OldOffset & Mask;
342       OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
343       Mask >>= 1;
344       assert(Mask && "One offset must be OK");
345     } while (!OpcodeForOffset);
346 
347     Register ScratchReg =
348         MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass);
349     int64_t HighOffset = OldOffset - Offset;
350 
351     if (MI->getDesc().TSFlags & SystemZII::HasIndex
352         && MI->getOperand(FIOperandNum + 2).getReg() == 0) {
353       // Load the offset into the scratch register and use it as an index.
354       // The scratch register then dies here.
355       TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
356       MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
357       MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
358                                                         false, false, true);
359     } else {
360       // Load the anchor address into a scratch register.
361       unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset);
362       if (LAOpcode)
363         BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg)
364           .addReg(BasePtr).addImm(HighOffset).addReg(0);
365       else {
366         // Load the high offset into the scratch register and use it as
367         // an index.
368         TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
369         BuildMI(MBB, MI, DL, TII->get(SystemZ::LA), ScratchReg)
370           .addReg(BasePtr, RegState::Kill).addImm(0).addReg(ScratchReg);
371       }
372 
373       // Use the scratch register as the base.  It then dies here.
374       MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg,
375                                                     false, false, true);
376     }
377   }
378   MI->setDesc(TII->get(OpcodeForOffset));
379   MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
380   return false;
381 }
382 
383 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
384                                          const TargetRegisterClass *SrcRC,
385                                          unsigned SubReg,
386                                          const TargetRegisterClass *DstRC,
387                                          unsigned DstSubReg,
388                                          const TargetRegisterClass *NewRC,
389                                          LiveIntervals &LIS) const {
390   assert (MI->isCopy() && "Only expecting COPY instructions");
391 
392   // Coalesce anything which is not a COPY involving a subreg to/from GR128.
393   if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) &&
394         (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64) &&
395         !MI->getOperand(1).isUndef()))
396     return true;
397 
398   // Allow coalescing of a GR128 subreg COPY only if the subreg liverange is
399   // local to one MBB with not too many interferring physreg clobbers. Otherwise
400   // regalloc may run out of registers.
401   unsigned SubregOpIdx = getRegSizeInBits(*SrcRC) == 128 ? 0 : 1;
402   LiveInterval &LI = LIS.getInterval(MI->getOperand(SubregOpIdx).getReg());
403 
404   // Check that the subreg is local to MBB.
405   MachineBasicBlock *MBB = MI->getParent();
406   MachineInstr *FirstMI = LIS.getInstructionFromIndex(LI.beginIndex());
407   MachineInstr *LastMI = LIS.getInstructionFromIndex(LI.endIndex());
408   if (!FirstMI || FirstMI->getParent() != MBB ||
409       !LastMI || LastMI->getParent() != MBB)
410     return false;
411 
412   // Check if coalescing seems safe by finding the set of clobbered physreg
413   // pairs in the region.
414   BitVector PhysClobbered(getNumRegs());
415   for (MachineBasicBlock::iterator MII = FirstMI,
416                                    MEE = std::next(LastMI->getIterator());
417        MII != MEE; ++MII)
418     for (const MachineOperand &MO : MII->operands())
419       if (MO.isReg() && MO.getReg().isPhysical()) {
420         for (MCPhysReg SI : superregs_inclusive(MO.getReg()))
421           if (NewRC->contains(SI)) {
422             PhysClobbered.set(SI);
423             break;
424           }
425       }
426 
427   // Demand an arbitrary margin of free regs.
428   unsigned const DemandedFreeGR128 = 3;
429   if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128))
430     return false;
431 
432   return true;
433 }
434 
435 Register
436 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
437   const SystemZFrameLowering *TFI = getFrameLowering(MF);
438   const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
439   SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
440 
441   return TFI->hasFP(MF) ? Regs->getFramePointerRegister()
442                         : Regs->getStackPointerRegister();
443 }
444 
445 const TargetRegisterClass *
446 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
447   if (RC == &SystemZ::CCRRegClass)
448     return &SystemZ::GR32BitRegClass;
449   return RC;
450 }
451 
452