xref: /llvm-project/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp (revision f7d8336a2fb4fad4a6efe5af9b0a10ddd970f6d3)
1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the SystemZ implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SystemZInstrInfo.h"
14 #include "MCTargetDesc/SystemZMCTargetDesc.h"
15 #include "SystemZ.h"
16 #include "SystemZInstrBuilder.h"
17 #include "SystemZSubtarget.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/CodeGen/LiveInterval.h"
20 #include "llvm/CodeGen/LiveIntervals.h"
21 #include "llvm/CodeGen/LiveRegUnits.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/SlotIndexes.h"
31 #include "llvm/CodeGen/StackMaps.h"
32 #include "llvm/CodeGen/TargetInstrInfo.h"
33 #include "llvm/CodeGen/TargetOpcodes.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/CodeGen/VirtRegMap.h"
36 #include "llvm/MC/MCInstrDesc.h"
37 #include "llvm/MC/MCRegisterInfo.h"
38 #include "llvm/Support/BranchProbability.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Target/TargetMachine.h"
42 #include <cassert>
43 #include <cstdint>
44 #include <iterator>
45 
46 using namespace llvm;
47 
48 #define GET_INSTRINFO_CTOR_DTOR
49 #define GET_INSTRMAP_INFO
50 #include "SystemZGenInstrInfo.inc"
51 
52 #define DEBUG_TYPE "systemz-II"
53 
54 // Return a mask with Count low bits set.
55 static uint64_t allOnes(unsigned int Count) {
56   return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
57 }
58 
59 // Pin the vtable to this file.
60 void SystemZInstrInfo::anchor() {}
61 
62 SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti)
63     : SystemZGenInstrInfo(-1, -1),
64       RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister()),
65       STI(sti) {}
66 
67 // MI is a 128-bit load or store.  Split it into two 64-bit loads or stores,
68 // each having the opcode given by NewOpcode.
69 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
70                                  unsigned NewOpcode) const {
71   MachineBasicBlock *MBB = MI->getParent();
72   MachineFunction &MF = *MBB->getParent();
73 
74   // Get two load or store instructions.  Use the original instruction for
75   // one of them and create a clone for the other.
76   MachineInstr *HighPartMI = MF.CloneMachineInstr(&*MI);
77   MachineInstr *LowPartMI = &*MI;
78   MBB->insert(LowPartMI, HighPartMI);
79 
80   // Set up the two 64-bit registers and remember super reg and its flags.
81   MachineOperand &HighRegOp = HighPartMI->getOperand(0);
82   MachineOperand &LowRegOp = LowPartMI->getOperand(0);
83   Register Reg128 = LowRegOp.getReg();
84   unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
85   unsigned Reg128Undef  = getUndefRegState(LowRegOp.isUndef());
86   HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
87   LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
88 
89   // The address in the first (high) instruction is already correct.
90   // Adjust the offset in the second (low) instruction.
91   MachineOperand &HighOffsetOp = HighPartMI->getOperand(2);
92   MachineOperand &LowOffsetOp = LowPartMI->getOperand(2);
93   LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
94 
95   // Set the opcodes.
96   unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
97   unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
98   assert(HighOpcode && LowOpcode && "Both offsets should be in range");
99   HighPartMI->setDesc(get(HighOpcode));
100   LowPartMI->setDesc(get(LowOpcode));
101 
102   MachineInstr *FirstMI = HighPartMI;
103   if (MI->mayStore()) {
104     FirstMI->getOperand(0).setIsKill(false);
105     // Add implicit uses of the super register in case one of the subregs is
106     // undefined. We could track liveness and skip storing an undefined
107     // subreg, but this is hopefully rare (discovered with llvm-stress).
108     // If Reg128 was killed, set kill flag on MI.
109     unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
110     MachineInstrBuilder(MF, HighPartMI).addReg(Reg128, Reg128UndefImpl);
111     MachineInstrBuilder(MF, LowPartMI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
112   } else {
113     // If HighPartMI clobbers any of the address registers, it needs to come
114     // after LowPartMI.
115     auto overlapsAddressReg = [&](Register Reg) -> bool {
116       return RI.regsOverlap(Reg, MI->getOperand(1).getReg()) ||
117              RI.regsOverlap(Reg, MI->getOperand(3).getReg());
118     };
119     if (overlapsAddressReg(HighRegOp.getReg())) {
120       assert(!overlapsAddressReg(LowRegOp.getReg()) &&
121              "Both loads clobber address!");
122       MBB->splice(HighPartMI, MBB, LowPartMI);
123       FirstMI = LowPartMI;
124     }
125   }
126 
127   // Clear the kill flags on the address registers in the first instruction.
128   FirstMI->getOperand(1).setIsKill(false);
129   FirstMI->getOperand(3).setIsKill(false);
130 }
131 
132 // Split ADJDYNALLOC instruction MI.
133 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
134   MachineBasicBlock *MBB = MI->getParent();
135   MachineFunction &MF = *MBB->getParent();
136   MachineFrameInfo &MFFrame = MF.getFrameInfo();
137   MachineOperand &OffsetMO = MI->getOperand(2);
138   SystemZCallingConventionRegisters *Regs = STI.getSpecialRegisters();
139 
140   uint64_t Offset = (MFFrame.getMaxCallFrameSize() +
141                      Regs->getCallFrameSize() +
142                      Regs->getStackPointerBias() +
143                      OffsetMO.getImm());
144   unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
145   assert(NewOpcode && "No support for huge argument lists yet");
146   MI->setDesc(get(NewOpcode));
147   OffsetMO.setImm(Offset);
148 }
149 
150 // MI is an RI-style pseudo instruction.  Replace it with LowOpcode
151 // if the first operand is a low GR32 and HighOpcode if the first operand
152 // is a high GR32.  ConvertHigh is true if LowOpcode takes a signed operand
153 // and HighOpcode takes an unsigned 32-bit operand.  In those cases,
154 // MI has the same kind of operand as LowOpcode, so needs to be converted
155 // if HighOpcode is used.
156 void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
157                                       unsigned HighOpcode,
158                                       bool ConvertHigh) const {
159   Register Reg = MI.getOperand(0).getReg();
160   bool IsHigh = SystemZ::isHighReg(Reg);
161   MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
162   if (IsHigh && ConvertHigh)
163     MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
164 }
165 
166 // MI is a three-operand RIE-style pseudo instruction.  Replace it with
167 // LowOpcodeK if the registers are both low GR32s, otherwise use a move
168 // followed by HighOpcode or LowOpcode, depending on whether the target
169 // is a high or low GR32.
170 void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
171                                        unsigned LowOpcodeK,
172                                        unsigned HighOpcode) const {
173   Register DestReg = MI.getOperand(0).getReg();
174   Register SrcReg = MI.getOperand(1).getReg();
175   bool DestIsHigh = SystemZ::isHighReg(DestReg);
176   bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
177   if (!DestIsHigh && !SrcIsHigh)
178     MI.setDesc(get(LowOpcodeK));
179   else {
180     if (DestReg != SrcReg) {
181       emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
182                     SystemZ::LR, 32, MI.getOperand(1).isKill(),
183                     MI.getOperand(1).isUndef());
184       MI.getOperand(1).setReg(DestReg);
185     }
186     MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
187     MI.tieOperands(0, 1);
188   }
189 }
190 
191 // MI is an RXY-style pseudo instruction.  Replace it with LowOpcode
192 // if the first operand is a low GR32 and HighOpcode if the first operand
193 // is a high GR32.
194 void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
195                                        unsigned HighOpcode) const {
196   Register Reg = MI.getOperand(0).getReg();
197   unsigned Opcode = getOpcodeForOffset(
198       SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode,
199       MI.getOperand(2).getImm());
200   MI.setDesc(get(Opcode));
201 }
202 
203 // MI is a load-on-condition pseudo instruction with a single register
204 // (source or destination) operand.  Replace it with LowOpcode if the
205 // register is a low GR32 and HighOpcode if the register is a high GR32.
206 void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
207                                        unsigned HighOpcode) const {
208   Register Reg = MI.getOperand(0).getReg();
209   unsigned Opcode = SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode;
210   MI.setDesc(get(Opcode));
211 }
212 
213 // MI is an RR-style pseudo instruction that zero-extends the low Size bits
214 // of one GRX32 into another.  Replace it with LowOpcode if both operands
215 // are low registers, otherwise use RISB[LH]G.
216 void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
217                                         unsigned Size) const {
218   MachineInstrBuilder MIB =
219     emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
220                MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
221                Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef());
222 
223   // Keep the remaining operands as-is.
224   for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2))
225     MIB.add(MO);
226 
227   MI.eraseFromParent();
228 }
229 
230 void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
231   MachineBasicBlock *MBB = MI->getParent();
232   MachineFunction &MF = *MBB->getParent();
233   const Register Reg64 = MI->getOperand(0).getReg();
234   const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);
235 
236   // EAR can only load the low subregister so us a shift for %a0 to produce
237   // the GR containing %a0 and %a1.
238 
239   // ear <reg>, %a0
240   BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
241     .addReg(SystemZ::A0)
242     .addReg(Reg64, RegState::ImplicitDefine);
243 
244   // sllg <reg>, <reg>, 32
245   BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64)
246     .addReg(Reg64)
247     .addReg(0)
248     .addImm(32);
249 
250   // ear <reg>, %a1
251   BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
252     .addReg(SystemZ::A1);
253 
254   // lg <reg>, 40(<reg>)
255   MI->setDesc(get(SystemZ::LG));
256   MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0);
257 }
258 
259 // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
260 // DestReg before MBBI in MBB.  Use LowLowOpcode when both DestReg and SrcReg
261 // are low registers, otherwise use RISB[LH]G.  Size is the number of bits
262 // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
263 // KillSrc is true if this move is the last use of SrcReg.
264 MachineInstrBuilder
265 SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
266                                 MachineBasicBlock::iterator MBBI,
267                                 const DebugLoc &DL, unsigned DestReg,
268                                 unsigned SrcReg, unsigned LowLowOpcode,
269                                 unsigned Size, bool KillSrc,
270                                 bool UndefSrc) const {
271   unsigned Opcode;
272   bool DestIsHigh = SystemZ::isHighReg(DestReg);
273   bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
274   if (DestIsHigh && SrcIsHigh)
275     Opcode = SystemZ::RISBHH;
276   else if (DestIsHigh && !SrcIsHigh)
277     Opcode = SystemZ::RISBHL;
278   else if (!DestIsHigh && SrcIsHigh)
279     Opcode = SystemZ::RISBLH;
280   else {
281     return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
282       .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc));
283   }
284   unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
285   return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
286     .addReg(DestReg, RegState::Undef)
287     .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc))
288     .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
289 }
290 
291 MachineInstr *SystemZInstrInfo::commuteInstructionImpl(MachineInstr &MI,
292                                                        bool NewMI,
293                                                        unsigned OpIdx1,
294                                                        unsigned OpIdx2) const {
295   auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
296     if (NewMI)
297       return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
298     return MI;
299   };
300 
301   switch (MI.getOpcode()) {
302   case SystemZ::SELRMux:
303   case SystemZ::SELFHR:
304   case SystemZ::SELR:
305   case SystemZ::SELGR:
306   case SystemZ::LOCRMux:
307   case SystemZ::LOCFHR:
308   case SystemZ::LOCR:
309   case SystemZ::LOCGR: {
310     auto &WorkingMI = cloneIfNew(MI);
311     // Invert condition.
312     unsigned CCValid = WorkingMI.getOperand(3).getImm();
313     unsigned CCMask = WorkingMI.getOperand(4).getImm();
314     WorkingMI.getOperand(4).setImm(CCMask ^ CCValid);
315     return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
316                                                    OpIdx1, OpIdx2);
317   }
318   default:
319     return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
320   }
321 }
322 
323 // If MI is a simple load or store for a frame object, return the register
324 // it loads or stores and set FrameIndex to the index of the frame object.
325 // Return 0 otherwise.
326 //
327 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
328 static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
329                         unsigned Flag) {
330   const MCInstrDesc &MCID = MI.getDesc();
331   if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
332       MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
333     FrameIndex = MI.getOperand(1).getIndex();
334     return MI.getOperand(0).getReg();
335   }
336   return 0;
337 }
338 
339 Register SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
340                                                int &FrameIndex) const {
341   return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
342 }
343 
344 Register SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
345                                               int &FrameIndex) const {
346   return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
347 }
348 
349 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI,
350                                        int &DestFrameIndex,
351                                        int &SrcFrameIndex) const {
352   // Check for MVC 0(Length,FI1),0(FI2)
353   const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo();
354   if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
355       MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
356       MI.getOperand(4).getImm() != 0)
357     return false;
358 
359   // Check that Length covers the full slots.
360   int64_t Length = MI.getOperand(2).getImm();
361   unsigned FI1 = MI.getOperand(0).getIndex();
362   unsigned FI2 = MI.getOperand(3).getIndex();
363   if (MFI.getObjectSize(FI1) != Length ||
364       MFI.getObjectSize(FI2) != Length)
365     return false;
366 
367   DestFrameIndex = FI1;
368   SrcFrameIndex = FI2;
369   return true;
370 }
371 
372 bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
373                                      MachineBasicBlock *&TBB,
374                                      MachineBasicBlock *&FBB,
375                                      SmallVectorImpl<MachineOperand> &Cond,
376                                      bool AllowModify) const {
377   // Most of the code and comments here are boilerplate.
378 
379   // Start from the bottom of the block and work up, examining the
380   // terminator instructions.
381   MachineBasicBlock::iterator I = MBB.end();
382   while (I != MBB.begin()) {
383     --I;
384     if (I->isDebugInstr())
385       continue;
386 
387     // Working from the bottom, when we see a non-terminator instruction, we're
388     // done.
389     if (!isUnpredicatedTerminator(*I))
390       break;
391 
392     // A terminator that isn't a branch can't easily be handled by this
393     // analysis.
394     if (!I->isBranch())
395       return true;
396 
397     // Can't handle indirect branches.
398     SystemZII::Branch Branch(getBranchInfo(*I));
399     if (!Branch.hasMBBTarget())
400       return true;
401 
402     // Punt on compound branches.
403     if (Branch.Type != SystemZII::BranchNormal)
404       return true;
405 
406     if (Branch.CCMask == SystemZ::CCMASK_ANY) {
407       // Handle unconditional branches.
408       if (!AllowModify) {
409         TBB = Branch.getMBBTarget();
410         continue;
411       }
412 
413       // If the block has any instructions after a JMP, delete them.
414       MBB.erase(std::next(I), MBB.end());
415 
416       Cond.clear();
417       FBB = nullptr;
418 
419       // Delete the JMP if it's equivalent to a fall-through.
420       if (MBB.isLayoutSuccessor(Branch.getMBBTarget())) {
421         TBB = nullptr;
422         I->eraseFromParent();
423         I = MBB.end();
424         continue;
425       }
426 
427       // TBB is used to indicate the unconditinal destination.
428       TBB = Branch.getMBBTarget();
429       continue;
430     }
431 
432     // Working from the bottom, handle the first conditional branch.
433     if (Cond.empty()) {
434       // FIXME: add X86-style branch swap
435       FBB = TBB;
436       TBB = Branch.getMBBTarget();
437       Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
438       Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
439       continue;
440     }
441 
442     // Handle subsequent conditional branches.
443     assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
444 
445     // Only handle the case where all conditional branches branch to the same
446     // destination.
447     if (TBB != Branch.getMBBTarget())
448       return true;
449 
450     // If the conditions are the same, we can leave them alone.
451     unsigned OldCCValid = Cond[0].getImm();
452     unsigned OldCCMask = Cond[1].getImm();
453     if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
454       continue;
455 
456     // FIXME: Try combining conditions like X86 does.  Should be easy on Z!
457     return false;
458   }
459 
460   return false;
461 }
462 
463 unsigned SystemZInstrInfo::removeBranch(MachineBasicBlock &MBB,
464                                         int *BytesRemoved) const {
465   assert(!BytesRemoved && "code size not handled");
466 
467   // Most of the code and comments here are boilerplate.
468   MachineBasicBlock::iterator I = MBB.end();
469   unsigned Count = 0;
470 
471   while (I != MBB.begin()) {
472     --I;
473     if (I->isDebugInstr())
474       continue;
475     if (!I->isBranch())
476       break;
477     if (!getBranchInfo(*I).hasMBBTarget())
478       break;
479     // Remove the branch.
480     I->eraseFromParent();
481     I = MBB.end();
482     ++Count;
483   }
484 
485   return Count;
486 }
487 
488 bool SystemZInstrInfo::
489 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
490   assert(Cond.size() == 2 && "Invalid condition");
491   Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
492   return false;
493 }
494 
495 unsigned SystemZInstrInfo::insertBranch(MachineBasicBlock &MBB,
496                                         MachineBasicBlock *TBB,
497                                         MachineBasicBlock *FBB,
498                                         ArrayRef<MachineOperand> Cond,
499                                         const DebugLoc &DL,
500                                         int *BytesAdded) const {
501   // In this function we output 32-bit branches, which should always
502   // have enough range.  They can be shortened and relaxed by later code
503   // in the pipeline, if desired.
504 
505   // Shouldn't be a fall through.
506   assert(TBB && "insertBranch must not be told to insert a fallthrough");
507   assert((Cond.size() == 2 || Cond.size() == 0) &&
508          "SystemZ branch conditions have one component!");
509   assert(!BytesAdded && "code size not handled");
510 
511   if (Cond.empty()) {
512     // Unconditional branch?
513     assert(!FBB && "Unconditional branch with multiple successors!");
514     BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
515     return 1;
516   }
517 
518   // Conditional branch.
519   unsigned Count = 0;
520   unsigned CCValid = Cond[0].getImm();
521   unsigned CCMask = Cond[1].getImm();
522   BuildMI(&MBB, DL, get(SystemZ::BRC))
523     .addImm(CCValid).addImm(CCMask).addMBB(TBB);
524   ++Count;
525 
526   if (FBB) {
527     // Two-way Conditional branch. Insert the second branch.
528     BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
529     ++Count;
530   }
531   return Count;
532 }
533 
534 bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
535                                       Register &SrcReg2, int64_t &Mask,
536                                       int64_t &Value) const {
537   assert(MI.isCompare() && "Caller should have checked for a comparison");
538 
539   if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
540       MI.getOperand(1).isImm()) {
541     SrcReg = MI.getOperand(0).getReg();
542     SrcReg2 = 0;
543     Value = MI.getOperand(1).getImm();
544     Mask = ~0;
545     return true;
546   }
547 
548   return false;
549 }
550 
551 bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
552                                        ArrayRef<MachineOperand> Pred,
553                                        Register DstReg, Register TrueReg,
554                                        Register FalseReg, int &CondCycles,
555                                        int &TrueCycles,
556                                        int &FalseCycles) const {
557   // Not all subtargets have LOCR instructions.
558   if (!STI.hasLoadStoreOnCond())
559     return false;
560   if (Pred.size() != 2)
561     return false;
562 
563   // Check register classes.
564   const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
565   const TargetRegisterClass *RC =
566     RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
567   if (!RC)
568     return false;
569 
570   // We have LOCR instructions for 32 and 64 bit general purpose registers.
571   if ((STI.hasLoadStoreOnCond2() &&
572        SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) ||
573       SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
574       SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
575     CondCycles = 2;
576     TrueCycles = 2;
577     FalseCycles = 2;
578     return true;
579   }
580 
581   // Can't do anything else.
582   return false;
583 }
584 
585 void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB,
586                                     MachineBasicBlock::iterator I,
587                                     const DebugLoc &DL, Register DstReg,
588                                     ArrayRef<MachineOperand> Pred,
589                                     Register TrueReg,
590                                     Register FalseReg) const {
591   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
592   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
593 
594   assert(Pred.size() == 2 && "Invalid condition");
595   unsigned CCValid = Pred[0].getImm();
596   unsigned CCMask = Pred[1].getImm();
597 
598   unsigned Opc;
599   if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) {
600     if (STI.hasMiscellaneousExtensions3())
601       Opc = SystemZ::SELRMux;
602     else if (STI.hasLoadStoreOnCond2())
603       Opc = SystemZ::LOCRMux;
604     else {
605       Opc = SystemZ::LOCR;
606       MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass);
607       Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
608       Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
609       BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);
610       BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);
611       TrueReg = TReg;
612       FalseReg = FReg;
613     }
614   } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
615     if (STI.hasMiscellaneousExtensions3())
616       Opc = SystemZ::SELGR;
617     else
618       Opc = SystemZ::LOCGR;
619   } else
620     llvm_unreachable("Invalid register class");
621 
622   BuildMI(MBB, I, DL, get(Opc), DstReg)
623     .addReg(FalseReg).addReg(TrueReg)
624     .addImm(CCValid).addImm(CCMask);
625 }
626 
627 MachineInstr *SystemZInstrInfo::optimizeLoadInstr(MachineInstr &MI,
628                                                   const MachineRegisterInfo *MRI,
629                                                   Register &FoldAsLoadDefReg,
630                                                   MachineInstr *&DefMI) const {
631   // Check whether we can move the DefMI load, and that it only has one use.
632   DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
633   assert(DefMI);
634   bool SawStore = false;
635   if (!DefMI->isSafeToMove(SawStore) || !MRI->hasOneNonDBGUse(FoldAsLoadDefReg))
636     return nullptr;
637 
638   int UseOpIdx =
639       MI.findRegisterUseOperandIdx(FoldAsLoadDefReg, /*TRI=*/nullptr);
640   assert(UseOpIdx != -1 && "Expected FoldAsLoadDefReg to be used by MI.");
641 
642   // Check whether we can fold the load.
643   if (MachineInstr *FoldMI =
644           foldMemoryOperand(MI, {((unsigned)UseOpIdx)}, *DefMI)) {
645     FoldAsLoadDefReg = 0;
646     return FoldMI;
647   }
648 
649   return nullptr;
650 }
651 
652 bool SystemZInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
653                                      Register Reg,
654                                      MachineRegisterInfo *MRI) const {
655   unsigned DefOpc = DefMI.getOpcode();
656 
657   if (DefOpc == SystemZ::VGBM) {
658     int64_t ImmVal = DefMI.getOperand(1).getImm();
659     if (ImmVal != 0) // TODO: Handle other values
660       return false;
661 
662     // Fold gr128 = COPY (vr128 VGBM imm)
663     //
664     // %tmp:gr64 = LGHI 0
665     // to  gr128 = REG_SEQUENCE %tmp, %tmp
666     assert(DefMI.getOperand(0).getReg() == Reg);
667 
668     if (!UseMI.isCopy())
669       return false;
670 
671     Register CopyDstReg = UseMI.getOperand(0).getReg();
672     if (CopyDstReg.isVirtual() &&
673         MRI->getRegClass(CopyDstReg) == &SystemZ::GR128BitRegClass &&
674         MRI->hasOneNonDBGUse(Reg)) {
675       // TODO: Handle physical registers
676       // TODO: Handle gr64 uses with subregister indexes
677       // TODO: Should this multi-use cases?
678       Register TmpReg = MRI->createVirtualRegister(&SystemZ::GR64BitRegClass);
679       MachineBasicBlock &MBB = *UseMI.getParent();
680 
681       loadImmediate(MBB, UseMI.getIterator(), TmpReg, ImmVal);
682 
683       UseMI.setDesc(get(SystemZ::REG_SEQUENCE));
684       UseMI.getOperand(1).setReg(TmpReg);
685       MachineInstrBuilder(*MBB.getParent(), &UseMI)
686           .addImm(SystemZ::subreg_h64)
687           .addReg(TmpReg)
688           .addImm(SystemZ::subreg_l64);
689 
690       if (MRI->use_nodbg_empty(Reg))
691         DefMI.eraseFromParent();
692       return true;
693     }
694 
695     return false;
696   }
697 
698   if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
699       DefOpc != SystemZ::LGHI)
700     return false;
701   if (DefMI.getOperand(0).getReg() != Reg)
702     return false;
703   int32_t ImmVal = (int32_t)DefMI.getOperand(1).getImm();
704 
705   unsigned UseOpc = UseMI.getOpcode();
706   unsigned NewUseOpc;
707   unsigned UseIdx;
708   int CommuteIdx = -1;
709   bool TieOps = false;
710   switch (UseOpc) {
711   case SystemZ::SELRMux:
712     TieOps = true;
713     [[fallthrough]];
714   case SystemZ::LOCRMux:
715     if (!STI.hasLoadStoreOnCond2())
716       return false;
717     NewUseOpc = SystemZ::LOCHIMux;
718     if (UseMI.getOperand(2).getReg() == Reg)
719       UseIdx = 2;
720     else if (UseMI.getOperand(1).getReg() == Reg)
721       UseIdx = 2, CommuteIdx = 1;
722     else
723       return false;
724     break;
725   case SystemZ::SELGR:
726     TieOps = true;
727     [[fallthrough]];
728   case SystemZ::LOCGR:
729     if (!STI.hasLoadStoreOnCond2())
730       return false;
731     NewUseOpc = SystemZ::LOCGHI;
732     if (UseMI.getOperand(2).getReg() == Reg)
733       UseIdx = 2;
734     else if (UseMI.getOperand(1).getReg() == Reg)
735       UseIdx = 2, CommuteIdx = 1;
736     else
737       return false;
738     break;
739   default:
740     return false;
741   }
742 
743   if (CommuteIdx != -1)
744     if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx))
745       return false;
746 
747   bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
748   UseMI.setDesc(get(NewUseOpc));
749   if (TieOps)
750     UseMI.tieOperands(0, 1);
751   UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
752   if (DeleteDef)
753     DefMI.eraseFromParent();
754 
755   return true;
756 }
757 
758 bool SystemZInstrInfo::isPredicable(const MachineInstr &MI) const {
759   unsigned Opcode = MI.getOpcode();
760   if (Opcode == SystemZ::Return ||
761       Opcode == SystemZ::Return_XPLINK ||
762       Opcode == SystemZ::Trap ||
763       Opcode == SystemZ::CallJG ||
764       Opcode == SystemZ::CallBR)
765     return true;
766   return false;
767 }
768 
769 bool SystemZInstrInfo::
770 isProfitableToIfCvt(MachineBasicBlock &MBB,
771                     unsigned NumCycles, unsigned ExtraPredCycles,
772                     BranchProbability Probability) const {
773   // Avoid using conditional returns at the end of a loop (since then
774   // we'd need to emit an unconditional branch to the beginning anyway,
775   // making the loop body longer).  This doesn't apply for low-probability
776   // loops (eg. compare-and-swap retry), so just decide based on branch
777   // probability instead of looping structure.
778   // However, since Compare and Trap instructions cost the same as a regular
779   // Compare instruction, we should allow the if conversion to convert this
780   // into a Conditional Compare regardless of the branch probability.
781   if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
782       MBB.succ_empty() && Probability < BranchProbability(1, 8))
783     return false;
784   // For now only convert single instructions.
785   return NumCycles == 1;
786 }
787 
788 bool SystemZInstrInfo::
789 isProfitableToIfCvt(MachineBasicBlock &TMBB,
790                     unsigned NumCyclesT, unsigned ExtraPredCyclesT,
791                     MachineBasicBlock &FMBB,
792                     unsigned NumCyclesF, unsigned ExtraPredCyclesF,
793                     BranchProbability Probability) const {
794   // For now avoid converting mutually-exclusive cases.
795   return false;
796 }
797 
798 bool SystemZInstrInfo::
799 isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
800                           BranchProbability Probability) const {
801   // For now only duplicate single instructions.
802   return NumCycles == 1;
803 }
804 
805 bool SystemZInstrInfo::PredicateInstruction(
806     MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
807   assert(Pred.size() == 2 && "Invalid condition");
808   unsigned CCValid = Pred[0].getImm();
809   unsigned CCMask = Pred[1].getImm();
810   assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
811   unsigned Opcode = MI.getOpcode();
812   if (Opcode == SystemZ::Trap) {
813     MI.setDesc(get(SystemZ::CondTrap));
814     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
815       .addImm(CCValid).addImm(CCMask)
816       .addReg(SystemZ::CC, RegState::Implicit);
817     return true;
818   }
819   if (Opcode == SystemZ::Return || Opcode == SystemZ::Return_XPLINK) {
820     MI.setDesc(get(Opcode == SystemZ::Return ? SystemZ::CondReturn
821                                              : SystemZ::CondReturn_XPLINK));
822     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
823         .addImm(CCValid)
824         .addImm(CCMask)
825         .addReg(SystemZ::CC, RegState::Implicit);
826     return true;
827   }
828   if (Opcode == SystemZ::CallJG) {
829     MachineOperand FirstOp = MI.getOperand(0);
830     const uint32_t *RegMask = MI.getOperand(1).getRegMask();
831     MI.removeOperand(1);
832     MI.removeOperand(0);
833     MI.setDesc(get(SystemZ::CallBRCL));
834     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
835         .addImm(CCValid)
836         .addImm(CCMask)
837         .add(FirstOp)
838         .addRegMask(RegMask)
839         .addReg(SystemZ::CC, RegState::Implicit);
840     return true;
841   }
842   if (Opcode == SystemZ::CallBR) {
843     MachineOperand Target = MI.getOperand(0);
844     const uint32_t *RegMask = MI.getOperand(1).getRegMask();
845     MI.removeOperand(1);
846     MI.removeOperand(0);
847     MI.setDesc(get(SystemZ::CallBCR));
848     MachineInstrBuilder(*MI.getParent()->getParent(), MI)
849       .addImm(CCValid).addImm(CCMask)
850       .add(Target)
851       .addRegMask(RegMask)
852       .addReg(SystemZ::CC, RegState::Implicit);
853     return true;
854   }
855   return false;
856 }
857 
858 void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
859                                    MachineBasicBlock::iterator MBBI,
860                                    const DebugLoc &DL, MCRegister DestReg,
861                                    MCRegister SrcReg, bool KillSrc,
862                                    bool RenamableDest,
863                                    bool RenamableSrc) const {
864   // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
865   // super register in case one of the subregs is undefined.
866   // This handles ADDR128 too.
867   if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
868     copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
869                 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
870     MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
871       .addReg(SrcReg, RegState::Implicit);
872     copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
873                 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
874     MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
875       .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
876     return;
877   }
878 
879   if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
880     emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc,
881                   false);
882     return;
883   }
884 
885   // Move 128-bit floating-point values between VR128 and FP128.
886   if (SystemZ::VR128BitRegClass.contains(DestReg) &&
887       SystemZ::FP128BitRegClass.contains(SrcReg)) {
888     MCRegister SrcRegHi =
889         RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64),
890                                SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
891     MCRegister SrcRegLo =
892         RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64),
893                                SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
894 
895     BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg)
896       .addReg(SrcRegHi, getKillRegState(KillSrc))
897       .addReg(SrcRegLo, getKillRegState(KillSrc));
898     return;
899   }
900   if (SystemZ::FP128BitRegClass.contains(DestReg) &&
901       SystemZ::VR128BitRegClass.contains(SrcReg)) {
902     MCRegister DestRegHi =
903         RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64),
904                                SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
905     MCRegister DestRegLo =
906         RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64),
907                                SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
908 
909     if (DestRegHi != SrcReg)
910       copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false);
911     BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo)
912       .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1);
913     return;
914   }
915 
916   if (SystemZ::FP128BitRegClass.contains(DestReg) &&
917       SystemZ::GR128BitRegClass.contains(SrcReg)) {
918     MCRegister DestRegHi = RI.getSubReg(DestReg, SystemZ::subreg_h64);
919     MCRegister DestRegLo = RI.getSubReg(DestReg, SystemZ::subreg_l64);
920     MCRegister SrcRegHi = RI.getSubReg(SrcReg, SystemZ::subreg_h64);
921     MCRegister SrcRegLo = RI.getSubReg(SrcReg, SystemZ::subreg_l64);
922 
923     BuildMI(MBB, MBBI, DL, get(SystemZ::LDGR), DestRegHi)
924         .addReg(SrcRegHi)
925         .addReg(DestReg, RegState::ImplicitDefine);
926 
927     BuildMI(MBB, MBBI, DL, get(SystemZ::LDGR), DestRegLo)
928         .addReg(SrcRegLo, getKillRegState(KillSrc));
929     return;
930   }
931 
932   // Move CC value from a GR32.
933   if (DestReg == SystemZ::CC) {
934     unsigned Opcode =
935       SystemZ::GR32BitRegClass.contains(SrcReg) ? SystemZ::TMLH : SystemZ::TMHH;
936     BuildMI(MBB, MBBI, DL, get(Opcode))
937       .addReg(SrcReg, getKillRegState(KillSrc))
938       .addImm(3 << (SystemZ::IPM_CC - 16));
939     return;
940   }
941 
942   if (SystemZ::GR128BitRegClass.contains(DestReg) &&
943       SystemZ::VR128BitRegClass.contains(SrcReg)) {
944     MCRegister DestH64 = RI.getSubReg(DestReg, SystemZ::subreg_h64);
945     MCRegister DestL64 = RI.getSubReg(DestReg, SystemZ::subreg_l64);
946 
947     BuildMI(MBB, MBBI, DL, get(SystemZ::VLGVG), DestH64)
948         .addReg(SrcReg)
949         .addReg(SystemZ::NoRegister)
950         .addImm(0)
951         .addDef(DestReg, RegState::Implicit);
952     BuildMI(MBB, MBBI, DL, get(SystemZ::VLGVG), DestL64)
953         .addReg(SrcReg, getKillRegState(KillSrc))
954         .addReg(SystemZ::NoRegister)
955         .addImm(1);
956     return;
957   }
958 
959   if (SystemZ::VR128BitRegClass.contains(DestReg) &&
960       SystemZ::GR128BitRegClass.contains(SrcReg)) {
961     BuildMI(MBB, MBBI, DL, get(SystemZ::VLVGP), DestReg)
962         .addReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64))
963         .addReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64));
964     return;
965   }
966 
967   // Everything else needs only one instruction.
968   unsigned Opcode;
969   if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
970     Opcode = SystemZ::LGR;
971   else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
972     // For z13 we prefer LDR over LER to avoid partial register dependencies.
973     Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
974   else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
975     Opcode = SystemZ::LDR;
976   else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
977     Opcode = SystemZ::LXR;
978   else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
979     Opcode = SystemZ::VLR32;
980   else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
981     Opcode = SystemZ::VLR64;
982   else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
983     Opcode = SystemZ::VLR;
984   else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg))
985     Opcode = SystemZ::CPYA;
986   else
987     llvm_unreachable("Impossible reg-to-reg copy");
988 
989   BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
990     .addReg(SrcReg, getKillRegState(KillSrc));
991 }
992 
993 void SystemZInstrInfo::storeRegToStackSlot(
994     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
995     bool isKill, int FrameIdx, const TargetRegisterClass *RC,
996     const TargetRegisterInfo *TRI, Register VReg,
997     MachineInstr::MIFlag Flags) const {
998   DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
999 
1000   // Callers may expect a single instruction, so keep 128-bit moves
1001   // together for now and lower them after register allocation.
1002   unsigned LoadOpcode, StoreOpcode;
1003   getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
1004   addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
1005                         .addReg(SrcReg, getKillRegState(isKill)),
1006                     FrameIdx);
1007 }
1008 
1009 void SystemZInstrInfo::loadRegFromStackSlot(
1010     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg,
1011     int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
1012     Register VReg, MachineInstr::MIFlag Flags) const {
1013   DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1014 
1015   // Callers may expect a single instruction, so keep 128-bit moves
1016   // together for now and lower them after register allocation.
1017   unsigned LoadOpcode, StoreOpcode;
1018   getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
1019   addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
1020                     FrameIdx);
1021 }
1022 
1023 // Return true if MI is a simple load or store with a 12-bit displacement
1024 // and no index.  Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
1025 static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
1026   const MCInstrDesc &MCID = MI->getDesc();
1027   return ((MCID.TSFlags & Flag) &&
1028           isUInt<12>(MI->getOperand(2).getImm()) &&
1029           MI->getOperand(3).getReg() == 0);
1030 }
1031 
1032 namespace {
1033 
1034 struct LogicOp {
1035   LogicOp() = default;
1036   LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
1037     : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
1038 
1039   explicit operator bool() const { return RegSize; }
1040 
1041   unsigned RegSize = 0;
1042   unsigned ImmLSB = 0;
1043   unsigned ImmSize = 0;
1044 };
1045 
1046 } // end anonymous namespace
1047 
1048 static LogicOp interpretAndImmediate(unsigned Opcode) {
1049   switch (Opcode) {
1050   case SystemZ::NILMux: return LogicOp(32,  0, 16);
1051   case SystemZ::NIHMux: return LogicOp(32, 16, 16);
1052   case SystemZ::NILL64: return LogicOp(64,  0, 16);
1053   case SystemZ::NILH64: return LogicOp(64, 16, 16);
1054   case SystemZ::NIHL64: return LogicOp(64, 32, 16);
1055   case SystemZ::NIHH64: return LogicOp(64, 48, 16);
1056   case SystemZ::NIFMux: return LogicOp(32,  0, 32);
1057   case SystemZ::NILF64: return LogicOp(64,  0, 32);
1058   case SystemZ::NIHF64: return LogicOp(64, 32, 32);
1059   default:              return LogicOp();
1060   }
1061 }
1062 
1063 static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
1064   if (OldMI->registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr)) {
1065     MachineOperand *CCDef =
1066         NewMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr);
1067     if (CCDef != nullptr)
1068       CCDef->setIsDead(true);
1069   }
1070 }
1071 
1072 static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI,
1073                            MachineInstr::MIFlag Flag) {
1074   if (OldMI->getFlag(Flag))
1075     NewMI->setFlag(Flag);
1076 }
1077 
1078 MachineInstr *
1079 SystemZInstrInfo::convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
1080                                         LiveIntervals *LIS) const {
1081   MachineBasicBlock *MBB = MI.getParent();
1082 
1083   // Try to convert an AND into an RISBG-type instruction.
1084   // TODO: It might be beneficial to select RISBG and shorten to AND instead.
1085   if (LogicOp And = interpretAndImmediate(MI.getOpcode())) {
1086     uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
1087     // AND IMMEDIATE leaves the other bits of the register unchanged.
1088     Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
1089     unsigned Start, End;
1090     if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
1091       unsigned NewOpcode;
1092       if (And.RegSize == 64) {
1093         NewOpcode = SystemZ::RISBG;
1094         // Prefer RISBGN if available, since it does not clobber CC.
1095         if (STI.hasMiscellaneousExtensions())
1096           NewOpcode = SystemZ::RISBGN;
1097       } else {
1098         NewOpcode = SystemZ::RISBMux;
1099         Start &= 31;
1100         End &= 31;
1101       }
1102       MachineOperand &Dest = MI.getOperand(0);
1103       MachineOperand &Src = MI.getOperand(1);
1104       MachineInstrBuilder MIB =
1105           BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
1106               .add(Dest)
1107               .addReg(0)
1108               .addReg(Src.getReg(), getKillRegState(Src.isKill()),
1109                       Src.getSubReg())
1110               .addImm(Start)
1111               .addImm(End + 128)
1112               .addImm(0);
1113       if (LV) {
1114         unsigned NumOps = MI.getNumOperands();
1115         for (unsigned I = 1; I < NumOps; ++I) {
1116           MachineOperand &Op = MI.getOperand(I);
1117           if (Op.isReg() && Op.isKill())
1118             LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1119         }
1120       }
1121       if (LIS)
1122         LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1123       transferDeadCC(&MI, MIB);
1124       return MIB;
1125     }
1126   }
1127   return nullptr;
1128 }
1129 
1130 bool SystemZInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
1131                                                    bool Invert) const {
1132   unsigned Opc = Inst.getOpcode();
1133   if (Invert) {
1134     auto InverseOpcode = getInverseOpcode(Opc);
1135     if (!InverseOpcode)
1136       return false;
1137     Opc = *InverseOpcode;
1138   }
1139 
1140   switch (Opc) {
1141   default:
1142     break;
1143   // Adds and multiplications.
1144   case SystemZ::WFADB:
1145   case SystemZ::WFASB:
1146   case SystemZ::WFAXB:
1147   case SystemZ::VFADB:
1148   case SystemZ::VFASB:
1149   case SystemZ::WFMDB:
1150   case SystemZ::WFMSB:
1151   case SystemZ::WFMXB:
1152   case SystemZ::VFMDB:
1153   case SystemZ::VFMSB:
1154     return (Inst.getFlag(MachineInstr::MIFlag::FmReassoc) &&
1155             Inst.getFlag(MachineInstr::MIFlag::FmNsz));
1156   }
1157 
1158   return false;
1159 }
1160 
1161 std::optional<unsigned>
1162 SystemZInstrInfo::getInverseOpcode(unsigned Opcode) const {
1163   // fadd => fsub
1164   switch (Opcode) {
1165   case SystemZ::WFADB:
1166     return SystemZ::WFSDB;
1167   case SystemZ::WFASB:
1168     return SystemZ::WFSSB;
1169   case SystemZ::WFAXB:
1170     return SystemZ::WFSXB;
1171   case SystemZ::VFADB:
1172     return SystemZ::VFSDB;
1173   case SystemZ::VFASB:
1174     return SystemZ::VFSSB;
1175   // fsub => fadd
1176   case SystemZ::WFSDB:
1177     return SystemZ::WFADB;
1178   case SystemZ::WFSSB:
1179     return SystemZ::WFASB;
1180   case SystemZ::WFSXB:
1181     return SystemZ::WFAXB;
1182   case SystemZ::VFSDB:
1183     return SystemZ::VFADB;
1184   case SystemZ::VFSSB:
1185     return SystemZ::VFASB;
1186   default:
1187     return std::nullopt;
1188   }
1189 }
1190 
1191 MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
1192     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
1193     MachineBasicBlock::iterator InsertPt, int FrameIndex,
1194     LiveIntervals *LIS, VirtRegMap *VRM) const {
1195   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1196   MachineRegisterInfo &MRI = MF.getRegInfo();
1197   const MachineFrameInfo &MFI = MF.getFrameInfo();
1198   unsigned Size = MFI.getObjectSize(FrameIndex);
1199   unsigned Opcode = MI.getOpcode();
1200 
1201   // Check CC liveness if new instruction introduces a dead def of CC.
1202   SlotIndex MISlot = SlotIndex();
1203   LiveRange *CCLiveRange = nullptr;
1204   bool CCLiveAtMI = true;
1205   if (LIS) {
1206     MISlot = LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
1207     auto CCUnits = TRI->regunits(MCRegister::from(SystemZ::CC));
1208     assert(range_size(CCUnits) == 1 && "CC only has one reg unit.");
1209     CCLiveRange = &LIS->getRegUnit(*CCUnits.begin());
1210     CCLiveAtMI = CCLiveRange->liveAt(MISlot);
1211   }
1212 
1213   if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1214     if (!CCLiveAtMI && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
1215         isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
1216       // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1217       MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
1218                                       MI.getDebugLoc(), get(SystemZ::AGSI))
1219         .addFrameIndex(FrameIndex)
1220         .addImm(0)
1221         .addImm(MI.getOperand(2).getImm());
1222       BuiltMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr)
1223           ->setIsDead(true);
1224       CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1225       return BuiltMI;
1226     }
1227     return nullptr;
1228   }
1229 
1230   // All other cases require a single operand.
1231   if (Ops.size() != 1)
1232     return nullptr;
1233 
1234   unsigned OpNum = Ops[0];
1235   assert(Size * 8 ==
1236            TRI->getRegSizeInBits(*MF.getRegInfo()
1237                                .getRegClass(MI.getOperand(OpNum).getReg())) &&
1238          "Invalid size combination");
1239 
1240   if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
1241       isInt<8>(MI.getOperand(2).getImm())) {
1242     // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1243     Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
1244     MachineInstr *BuiltMI =
1245         BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1246             .addFrameIndex(FrameIndex)
1247             .addImm(0)
1248             .addImm(MI.getOperand(2).getImm());
1249     transferDeadCC(&MI, BuiltMI);
1250     transferMIFlag(&MI, BuiltMI, MachineInstr::NoSWrap);
1251     return BuiltMI;
1252   }
1253 
1254   if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
1255        isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
1256       (Opcode == SystemZ::ALGFI && OpNum == 0 &&
1257        isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
1258     // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1259     Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
1260     MachineInstr *BuiltMI =
1261         BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1262             .addFrameIndex(FrameIndex)
1263             .addImm(0)
1264             .addImm((int8_t)MI.getOperand(2).getImm());
1265     transferDeadCC(&MI, BuiltMI);
1266     return BuiltMI;
1267   }
1268 
1269   if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
1270        isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
1271       (Opcode == SystemZ::SLGFI && OpNum == 0 &&
1272        isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
1273     // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1274     Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
1275     MachineInstr *BuiltMI =
1276         BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1277             .addFrameIndex(FrameIndex)
1278             .addImm(0)
1279             .addImm((int8_t)-MI.getOperand(2).getImm());
1280     transferDeadCC(&MI, BuiltMI);
1281     return BuiltMI;
1282   }
1283 
1284   unsigned MemImmOpc = 0;
1285   switch (Opcode) {
1286   case SystemZ::LHIMux:
1287   case SystemZ::LHI:    MemImmOpc = SystemZ::MVHI;  break;
1288   case SystemZ::LGHI:   MemImmOpc = SystemZ::MVGHI; break;
1289   case SystemZ::CHIMux:
1290   case SystemZ::CHI:    MemImmOpc = SystemZ::CHSI;  break;
1291   case SystemZ::CGHI:   MemImmOpc = SystemZ::CGHSI; break;
1292   case SystemZ::CLFIMux:
1293   case SystemZ::CLFI:
1294     if (isUInt<16>(MI.getOperand(1).getImm()))
1295       MemImmOpc = SystemZ::CLFHSI;
1296     break;
1297   case SystemZ::CLGFI:
1298     if (isUInt<16>(MI.getOperand(1).getImm()))
1299       MemImmOpc = SystemZ::CLGHSI;
1300     break;
1301   default: break;
1302   }
1303   if (MemImmOpc)
1304     return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1305                    get(MemImmOpc))
1306                .addFrameIndex(FrameIndex)
1307                .addImm(0)
1308                .addImm(MI.getOperand(1).getImm());
1309 
1310   if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
1311     bool Op0IsGPR = (Opcode == SystemZ::LGDR);
1312     bool Op1IsGPR = (Opcode == SystemZ::LDGR);
1313     // If we're spilling the destination of an LDGR or LGDR, store the
1314     // source register instead.
1315     if (OpNum == 0) {
1316       unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
1317       return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1318                      get(StoreOpcode))
1319           .add(MI.getOperand(1))
1320           .addFrameIndex(FrameIndex)
1321           .addImm(0)
1322           .addReg(0);
1323     }
1324     // If we're spilling the source of an LDGR or LGDR, load the
1325     // destination register instead.
1326     if (OpNum == 1) {
1327       unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
1328       return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1329                      get(LoadOpcode))
1330         .add(MI.getOperand(0))
1331         .addFrameIndex(FrameIndex)
1332         .addImm(0)
1333         .addReg(0);
1334     }
1335   }
1336 
1337   // Look for cases where the source of a simple store or the destination
1338   // of a simple load is being spilled.  Try to use MVC instead.
1339   //
1340   // Although MVC is in practice a fast choice in these cases, it is still
1341   // logically a bytewise copy.  This means that we cannot use it if the
1342   // load or store is volatile.  We also wouldn't be able to use MVC if
1343   // the two memories partially overlap, but that case cannot occur here,
1344   // because we know that one of the memories is a full frame index.
1345   //
1346   // For performance reasons, we also want to avoid using MVC if the addresses
1347   // might be equal.  We don't worry about that case here, because spill slot
1348   // coloring happens later, and because we have special code to remove
1349   // MVCs that turn out to be redundant.
1350   if (OpNum == 0 && MI.hasOneMemOperand()) {
1351     MachineMemOperand *MMO = *MI.memoperands_begin();
1352     if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) {
1353       // Handle conversion of loads.
1354       if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXLoad)) {
1355         return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1356                        get(SystemZ::MVC))
1357             .addFrameIndex(FrameIndex)
1358             .addImm(0)
1359             .addImm(Size)
1360             .add(MI.getOperand(1))
1361             .addImm(MI.getOperand(2).getImm())
1362             .addMemOperand(MMO);
1363       }
1364       // Handle conversion of stores.
1365       if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) {
1366         return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1367                        get(SystemZ::MVC))
1368             .add(MI.getOperand(1))
1369             .addImm(MI.getOperand(2).getImm())
1370             .addImm(Size)
1371             .addFrameIndex(FrameIndex)
1372             .addImm(0)
1373             .addMemOperand(MMO);
1374       }
1375     }
1376   }
1377 
1378   // If the spilled operand is the final one or the instruction is
1379   // commutable, try to change <INSN>R into <INSN>.  Don't introduce a def of
1380   // CC if it is live and MI does not define it.
1381   unsigned NumOps = MI.getNumExplicitOperands();
1382   int MemOpcode = SystemZ::getMemOpcode(Opcode);
1383   if (MemOpcode == -1 ||
1384       (CCLiveAtMI && !MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) &&
1385        get(MemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)))
1386     return nullptr;
1387 
1388   // Check if all other vregs have a usable allocation in the case of vector
1389   // to FP conversion.
1390   const MCInstrDesc &MCID = MI.getDesc();
1391   for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) {
1392     const MCOperandInfo &MCOI = MCID.operands()[I];
1393     if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum)
1394       continue;
1395     const TargetRegisterClass *RC = TRI->getRegClass(MCOI.RegClass);
1396     if (RC == &SystemZ::VR32BitRegClass || RC == &SystemZ::VR64BitRegClass) {
1397       Register Reg = MI.getOperand(I).getReg();
1398       Register PhysReg = Reg.isVirtual()
1399                              ? (VRM ? Register(VRM->getPhys(Reg)) : Register())
1400                              : Reg;
1401       if (!PhysReg ||
1402           !(SystemZ::FP32BitRegClass.contains(PhysReg) ||
1403             SystemZ::FP64BitRegClass.contains(PhysReg) ||
1404             SystemZ::VF128BitRegClass.contains(PhysReg)))
1405         return nullptr;
1406     }
1407   }
1408   // Fused multiply and add/sub need to have the same dst and accumulator reg.
1409   bool FusedFPOp = (Opcode == SystemZ::WFMADB || Opcode == SystemZ::WFMASB ||
1410                     Opcode == SystemZ::WFMSDB || Opcode == SystemZ::WFMSSB);
1411   if (FusedFPOp) {
1412     Register DstReg = VRM->getPhys(MI.getOperand(0).getReg());
1413     Register AccReg = VRM->getPhys(MI.getOperand(3).getReg());
1414     if (OpNum == 0 || OpNum == 3 || DstReg != AccReg)
1415       return nullptr;
1416   }
1417 
1418   // Try to swap compare operands if possible.
1419   bool NeedsCommute = false;
1420   if ((MI.getOpcode() == SystemZ::CR || MI.getOpcode() == SystemZ::CGR ||
1421        MI.getOpcode() == SystemZ::CLR || MI.getOpcode() == SystemZ::CLGR ||
1422        MI.getOpcode() == SystemZ::WFCDB || MI.getOpcode() == SystemZ::WFCSB ||
1423        MI.getOpcode() == SystemZ::WFKDB || MI.getOpcode() == SystemZ::WFKSB) &&
1424       OpNum == 0 && prepareCompareSwapOperands(MI))
1425     NeedsCommute = true;
1426 
1427   bool CCOperands = false;
1428   if (MI.getOpcode() == SystemZ::LOCRMux || MI.getOpcode() == SystemZ::LOCGR ||
1429       MI.getOpcode() == SystemZ::SELRMux || MI.getOpcode() == SystemZ::SELGR) {
1430     assert(MI.getNumOperands() == 6 && NumOps == 5 &&
1431            "LOCR/SELR instruction operands corrupt?");
1432     NumOps -= 2;
1433     CCOperands = true;
1434   }
1435 
1436   // See if this is a 3-address instruction that is convertible to 2-address
1437   // and suitable for folding below.  Only try this with virtual registers
1438   // and a provided VRM (during regalloc).
1439   if (NumOps == 3 && SystemZ::getTargetMemOpcode(MemOpcode) != -1) {
1440     if (VRM == nullptr)
1441       return nullptr;
1442     else {
1443       Register DstReg = MI.getOperand(0).getReg();
1444       Register DstPhys =
1445           (DstReg.isVirtual() ? Register(VRM->getPhys(DstReg)) : DstReg);
1446       Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
1447                                     : ((OpNum == 1 && MI.isCommutable())
1448                                            ? MI.getOperand(2).getReg()
1449                                            : Register()));
1450       if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
1451           SrcReg.isVirtual() && DstPhys == VRM->getPhys(SrcReg))
1452         NeedsCommute = (OpNum == 1);
1453       else
1454         return nullptr;
1455     }
1456   }
1457 
1458   if ((OpNum == NumOps - 1) || NeedsCommute || FusedFPOp) {
1459     const MCInstrDesc &MemDesc = get(MemOpcode);
1460     uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
1461     assert(AccessBytes != 0 && "Size of access should be known");
1462     assert(AccessBytes <= Size && "Access outside the frame index");
1463     uint64_t Offset = Size - AccessBytes;
1464     MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
1465                                       MI.getDebugLoc(), get(MemOpcode));
1466     if (MI.isCompare()) {
1467       assert(NumOps == 2 && "Expected 2 register operands for a compare.");
1468       MIB.add(MI.getOperand(NeedsCommute ? 1 : 0));
1469     }
1470     else if (FusedFPOp) {
1471       MIB.add(MI.getOperand(0));
1472       MIB.add(MI.getOperand(3));
1473       MIB.add(MI.getOperand(OpNum == 1 ? 2 : 1));
1474     }
1475     else {
1476       MIB.add(MI.getOperand(0));
1477       if (NeedsCommute)
1478         MIB.add(MI.getOperand(2));
1479       else
1480         for (unsigned I = 1; I < OpNum; ++I)
1481           MIB.add(MI.getOperand(I));
1482     }
1483     MIB.addFrameIndex(FrameIndex).addImm(Offset);
1484     if (MemDesc.TSFlags & SystemZII::HasIndex)
1485       MIB.addReg(0);
1486     if (CCOperands) {
1487       unsigned CCValid = MI.getOperand(NumOps).getImm();
1488       unsigned CCMask = MI.getOperand(NumOps + 1).getImm();
1489       MIB.addImm(CCValid);
1490       MIB.addImm(NeedsCommute ? CCMask ^ CCValid : CCMask);
1491     }
1492     if (MIB->definesRegister(SystemZ::CC, /*TRI=*/nullptr) &&
1493         (!MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) ||
1494          MI.registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr))) {
1495       MIB->addRegisterDead(SystemZ::CC, TRI);
1496       if (CCLiveRange)
1497         CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1498     }
1499     // Constrain the register classes if converted from a vector opcode. The
1500     // allocated regs are in an FP reg-class per previous check above.
1501     for (const MachineOperand &MO : MIB->operands())
1502       if (MO.isReg() && MO.getReg().isVirtual()) {
1503         Register Reg = MO.getReg();
1504         if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass)
1505           MRI.setRegClass(Reg, &SystemZ::FP32BitRegClass);
1506         else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass)
1507           MRI.setRegClass(Reg, &SystemZ::FP64BitRegClass);
1508         else if (MRI.getRegClass(Reg) == &SystemZ::VR128BitRegClass)
1509           MRI.setRegClass(Reg, &SystemZ::VF128BitRegClass);
1510       }
1511 
1512     transferDeadCC(&MI, MIB);
1513     transferMIFlag(&MI, MIB, MachineInstr::NoSWrap);
1514     transferMIFlag(&MI, MIB, MachineInstr::NoFPExcept);
1515     return MIB;
1516   }
1517 
1518   return nullptr;
1519 }
1520 
1521 MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
1522     MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
1523     MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1524     LiveIntervals *LIS) const {
1525   MachineRegisterInfo *MRI = &MF.getRegInfo();
1526   MachineBasicBlock *MBB = MI.getParent();
1527 
1528   // For reassociable FP operations, any loads have been purposefully left
1529   // unfolded so that MachineCombiner can do its work on reg/reg
1530   // opcodes. After that, as many loads as possible are now folded.
1531   // TODO: This may be beneficial with other opcodes as well as machine-sink
1532   // can move loads close to their user in a different MBB, which the isel
1533   // matcher did not see.
1534   unsigned LoadOpc = 0;
1535   unsigned RegMemOpcode = 0;
1536   const TargetRegisterClass *FPRC = nullptr;
1537   RegMemOpcode = MI.getOpcode() == SystemZ::WFADB   ? SystemZ::ADB
1538                  : MI.getOpcode() == SystemZ::WFSDB ? SystemZ::SDB
1539                  : MI.getOpcode() == SystemZ::WFMDB ? SystemZ::MDB
1540                                                     : 0;
1541   if (RegMemOpcode) {
1542     LoadOpc = SystemZ::VL64;
1543     FPRC = &SystemZ::FP64BitRegClass;
1544   } else {
1545     RegMemOpcode = MI.getOpcode() == SystemZ::WFASB   ? SystemZ::AEB
1546                    : MI.getOpcode() == SystemZ::WFSSB ? SystemZ::SEB
1547                    : MI.getOpcode() == SystemZ::WFMSB ? SystemZ::MEEB
1548                                                       : 0;
1549     if (RegMemOpcode) {
1550       LoadOpc = SystemZ::VL32;
1551       FPRC = &SystemZ::FP32BitRegClass;
1552     }
1553   }
1554   if (!RegMemOpcode || LoadMI.getOpcode() != LoadOpc)
1555     return nullptr;
1556 
1557   // If RegMemOpcode clobbers CC, first make sure CC is not live at this point.
1558   if (get(RegMemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)) {
1559     assert(LoadMI.getParent() == MI.getParent() && "Assuming a local fold.");
1560     assert(LoadMI != InsertPt && "Assuming InsertPt not to be first in MBB.");
1561     for (MachineBasicBlock::iterator MII = std::prev(InsertPt);;
1562          --MII) {
1563       if (MII->definesRegister(SystemZ::CC, /*TRI=*/nullptr)) {
1564         if (!MII->registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr))
1565           return nullptr;
1566         break;
1567       }
1568       if (MII == MBB->begin()) {
1569         if (MBB->isLiveIn(SystemZ::CC))
1570           return nullptr;
1571         break;
1572       }
1573     }
1574   }
1575 
1576   Register FoldAsLoadDefReg = LoadMI.getOperand(0).getReg();
1577   if (Ops.size() != 1 || FoldAsLoadDefReg != MI.getOperand(Ops[0]).getReg())
1578     return nullptr;
1579   Register DstReg = MI.getOperand(0).getReg();
1580   MachineOperand LHS = MI.getOperand(1);
1581   MachineOperand RHS = MI.getOperand(2);
1582   MachineOperand &RegMO = RHS.getReg() == FoldAsLoadDefReg ? LHS : RHS;
1583   if ((RegMemOpcode == SystemZ::SDB || RegMemOpcode == SystemZ::SEB) &&
1584       FoldAsLoadDefReg != RHS.getReg())
1585     return nullptr;
1586 
1587   MachineOperand &Base = LoadMI.getOperand(1);
1588   MachineOperand &Disp = LoadMI.getOperand(2);
1589   MachineOperand &Indx = LoadMI.getOperand(3);
1590   MachineInstrBuilder MIB =
1591       BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(RegMemOpcode), DstReg)
1592           .add(RegMO)
1593           .add(Base)
1594           .add(Disp)
1595           .add(Indx);
1596   MIB->addRegisterDead(SystemZ::CC, &RI);
1597   MRI->setRegClass(DstReg, FPRC);
1598   MRI->setRegClass(RegMO.getReg(), FPRC);
1599   transferMIFlag(&MI, MIB, MachineInstr::NoFPExcept);
1600 
1601   return MIB;
1602 }
1603 
1604 bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1605   switch (MI.getOpcode()) {
1606   case SystemZ::L128:
1607     splitMove(MI, SystemZ::LG);
1608     return true;
1609 
1610   case SystemZ::ST128:
1611     splitMove(MI, SystemZ::STG);
1612     return true;
1613 
1614   case SystemZ::LX:
1615     splitMove(MI, SystemZ::LD);
1616     return true;
1617 
1618   case SystemZ::STX:
1619     splitMove(MI, SystemZ::STD);
1620     return true;
1621 
1622   case SystemZ::LBMux:
1623     expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
1624     return true;
1625 
1626   case SystemZ::LHMux:
1627     expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
1628     return true;
1629 
1630   case SystemZ::LLCRMux:
1631     expandZExtPseudo(MI, SystemZ::LLCR, 8);
1632     return true;
1633 
1634   case SystemZ::LLHRMux:
1635     expandZExtPseudo(MI, SystemZ::LLHR, 16);
1636     return true;
1637 
1638   case SystemZ::LLCMux:
1639     expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
1640     return true;
1641 
1642   case SystemZ::LLHMux:
1643     expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
1644     return true;
1645 
1646   case SystemZ::LMux:
1647     expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
1648     return true;
1649 
1650   case SystemZ::LOCMux:
1651     expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH);
1652     return true;
1653 
1654   case SystemZ::LOCHIMux:
1655     expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);
1656     return true;
1657 
1658   case SystemZ::STCMux:
1659     expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
1660     return true;
1661 
1662   case SystemZ::STHMux:
1663     expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
1664     return true;
1665 
1666   case SystemZ::STMux:
1667     expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
1668     return true;
1669 
1670   case SystemZ::STOCMux:
1671     expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH);
1672     return true;
1673 
1674   case SystemZ::LHIMux:
1675     expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
1676     return true;
1677 
1678   case SystemZ::IIFMux:
1679     expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
1680     return true;
1681 
1682   case SystemZ::IILMux:
1683     expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
1684     return true;
1685 
1686   case SystemZ::IIHMux:
1687     expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
1688     return true;
1689 
1690   case SystemZ::NIFMux:
1691     expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
1692     return true;
1693 
1694   case SystemZ::NILMux:
1695     expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
1696     return true;
1697 
1698   case SystemZ::NIHMux:
1699     expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
1700     return true;
1701 
1702   case SystemZ::OIFMux:
1703     expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
1704     return true;
1705 
1706   case SystemZ::OILMux:
1707     expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
1708     return true;
1709 
1710   case SystemZ::OIHMux:
1711     expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
1712     return true;
1713 
1714   case SystemZ::XIFMux:
1715     expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
1716     return true;
1717 
1718   case SystemZ::TMLMux:
1719     expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
1720     return true;
1721 
1722   case SystemZ::TMHMux:
1723     expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
1724     return true;
1725 
1726   case SystemZ::AHIMux:
1727     expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
1728     return true;
1729 
1730   case SystemZ::AHIMuxK:
1731     expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
1732     return true;
1733 
1734   case SystemZ::AFIMux:
1735     expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
1736     return true;
1737 
1738   case SystemZ::CHIMux:
1739     expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false);
1740     return true;
1741 
1742   case SystemZ::CFIMux:
1743     expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
1744     return true;
1745 
1746   case SystemZ::CLFIMux:
1747     expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
1748     return true;
1749 
1750   case SystemZ::CMux:
1751     expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
1752     return true;
1753 
1754   case SystemZ::CLMux:
1755     expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
1756     return true;
1757 
1758   case SystemZ::RISBMux: {
1759     bool DestIsHigh = SystemZ::isHighReg(MI.getOperand(0).getReg());
1760     bool SrcIsHigh = SystemZ::isHighReg(MI.getOperand(2).getReg());
1761     if (SrcIsHigh == DestIsHigh)
1762       MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
1763     else {
1764       MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
1765       MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
1766     }
1767     return true;
1768   }
1769 
1770   case SystemZ::ADJDYNALLOC:
1771     splitAdjDynAlloc(MI);
1772     return true;
1773 
1774   case TargetOpcode::LOAD_STACK_GUARD:
1775     expandLoadStackGuard(&MI);
1776     return true;
1777 
1778   default:
1779     return false;
1780   }
1781 }
1782 
1783 unsigned SystemZInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
1784   if (MI.isInlineAsm()) {
1785     const MachineFunction *MF = MI.getParent()->getParent();
1786     const char *AsmStr = MI.getOperand(0).getSymbolName();
1787     return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1788   }
1789   else if (MI.getOpcode() == SystemZ::PATCHPOINT)
1790     return PatchPointOpers(&MI).getNumPatchBytes();
1791   else if (MI.getOpcode() == SystemZ::STACKMAP)
1792     return MI.getOperand(1).getImm();
1793   else if (MI.getOpcode() == SystemZ::FENTRY_CALL)
1794     return 6;
1795   if (MI.getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
1796     return 18;
1797   if (MI.getOpcode() == TargetOpcode::PATCHABLE_RET)
1798     return 18 + (MI.getOperand(0).getImm() == SystemZ::CondReturn ? 4 : 0);
1799 
1800   return MI.getDesc().getSize();
1801 }
1802 
1803 SystemZII::Branch
1804 SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const {
1805   switch (MI.getOpcode()) {
1806   case SystemZ::BR:
1807   case SystemZ::BI:
1808   case SystemZ::J:
1809   case SystemZ::JG:
1810     return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
1811                              SystemZ::CCMASK_ANY, &MI.getOperand(0));
1812 
1813   case SystemZ::BRC:
1814   case SystemZ::BRCL:
1815     return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
1816                              MI.getOperand(1).getImm(), &MI.getOperand(2));
1817 
1818   case SystemZ::BRCT:
1819   case SystemZ::BRCTH:
1820     return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
1821                              SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1822 
1823   case SystemZ::BRCTG:
1824     return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
1825                              SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1826 
1827   case SystemZ::CIJ:
1828   case SystemZ::CRJ:
1829     return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
1830                              MI.getOperand(2).getImm(), &MI.getOperand(3));
1831 
1832   case SystemZ::CLIJ:
1833   case SystemZ::CLRJ:
1834     return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
1835                              MI.getOperand(2).getImm(), &MI.getOperand(3));
1836 
1837   case SystemZ::CGIJ:
1838   case SystemZ::CGRJ:
1839     return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
1840                              MI.getOperand(2).getImm(), &MI.getOperand(3));
1841 
1842   case SystemZ::CLGIJ:
1843   case SystemZ::CLGRJ:
1844     return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
1845                              MI.getOperand(2).getImm(), &MI.getOperand(3));
1846 
1847   case SystemZ::INLINEASM_BR:
1848     // Don't try to analyze asm goto, so pass nullptr as branch target argument.
1849     return SystemZII::Branch(SystemZII::AsmGoto, 0, 0, nullptr);
1850 
1851   default:
1852     llvm_unreachable("Unrecognized branch opcode");
1853   }
1854 }
1855 
1856 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
1857                                            unsigned &LoadOpcode,
1858                                            unsigned &StoreOpcode) const {
1859   if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
1860     LoadOpcode = SystemZ::L;
1861     StoreOpcode = SystemZ::ST;
1862   } else if (RC == &SystemZ::GRH32BitRegClass) {
1863     LoadOpcode = SystemZ::LFH;
1864     StoreOpcode = SystemZ::STFH;
1865   } else if (RC == &SystemZ::GRX32BitRegClass) {
1866     LoadOpcode = SystemZ::LMux;
1867     StoreOpcode = SystemZ::STMux;
1868   } else if (RC == &SystemZ::GR64BitRegClass ||
1869              RC == &SystemZ::ADDR64BitRegClass) {
1870     LoadOpcode = SystemZ::LG;
1871     StoreOpcode = SystemZ::STG;
1872   } else if (RC == &SystemZ::GR128BitRegClass ||
1873              RC == &SystemZ::ADDR128BitRegClass) {
1874     LoadOpcode = SystemZ::L128;
1875     StoreOpcode = SystemZ::ST128;
1876   } else if (RC == &SystemZ::FP32BitRegClass) {
1877     LoadOpcode = SystemZ::LE;
1878     StoreOpcode = SystemZ::STE;
1879   } else if (RC == &SystemZ::FP64BitRegClass) {
1880     LoadOpcode = SystemZ::LD;
1881     StoreOpcode = SystemZ::STD;
1882   } else if (RC == &SystemZ::FP128BitRegClass) {
1883     LoadOpcode = SystemZ::LX;
1884     StoreOpcode = SystemZ::STX;
1885   } else if (RC == &SystemZ::VR32BitRegClass) {
1886     LoadOpcode = SystemZ::VL32;
1887     StoreOpcode = SystemZ::VST32;
1888   } else if (RC == &SystemZ::VR64BitRegClass) {
1889     LoadOpcode = SystemZ::VL64;
1890     StoreOpcode = SystemZ::VST64;
1891   } else if (RC == &SystemZ::VF128BitRegClass ||
1892              RC == &SystemZ::VR128BitRegClass) {
1893     LoadOpcode = SystemZ::VL;
1894     StoreOpcode = SystemZ::VST;
1895   } else
1896     llvm_unreachable("Unsupported regclass to load or store");
1897 }
1898 
1899 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
1900                                               int64_t Offset,
1901                                               const MachineInstr *MI) const {
1902   const MCInstrDesc &MCID = get(Opcode);
1903   int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
1904   if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
1905     // Get the instruction to use for unsigned 12-bit displacements.
1906     int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
1907     if (Disp12Opcode >= 0)
1908       return Disp12Opcode;
1909 
1910     // All address-related instructions can use unsigned 12-bit
1911     // displacements.
1912     return Opcode;
1913   }
1914   if (isInt<20>(Offset) && isInt<20>(Offset2)) {
1915     // Get the instruction to use for signed 20-bit displacements.
1916     int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
1917     if (Disp20Opcode >= 0)
1918       return Disp20Opcode;
1919 
1920     // Check whether Opcode allows signed 20-bit displacements.
1921     if (MCID.TSFlags & SystemZII::Has20BitOffset)
1922       return Opcode;
1923 
1924     // If a VR32/VR64 reg ended up in an FP register, use the FP opcode.
1925     if (MI && MI->getOperand(0).isReg()) {
1926       Register Reg = MI->getOperand(0).getReg();
1927       if (Reg.isPhysical() && SystemZMC::getFirstReg(Reg) < 16) {
1928         switch (Opcode) {
1929         case SystemZ::VL32:
1930           return SystemZ::LEY;
1931         case SystemZ::VST32:
1932           return SystemZ::STEY;
1933         case SystemZ::VL64:
1934           return SystemZ::LDY;
1935         case SystemZ::VST64:
1936           return SystemZ::STDY;
1937         default: break;
1938         }
1939       }
1940     }
1941   }
1942   return 0;
1943 }
1944 
1945 bool SystemZInstrInfo::hasDisplacementPairInsn(unsigned Opcode) const {
1946   const MCInstrDesc &MCID = get(Opcode);
1947   if (MCID.TSFlags & SystemZII::Has20BitOffset)
1948     return SystemZ::getDisp12Opcode(Opcode) >= 0;
1949   return SystemZ::getDisp20Opcode(Opcode) >= 0;
1950 }
1951 
1952 unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
1953   switch (Opcode) {
1954   case SystemZ::L:      return SystemZ::LT;
1955   case SystemZ::LY:     return SystemZ::LT;
1956   case SystemZ::LG:     return SystemZ::LTG;
1957   case SystemZ::LGF:    return SystemZ::LTGF;
1958   case SystemZ::LR:     return SystemZ::LTR;
1959   case SystemZ::LGFR:   return SystemZ::LTGFR;
1960   case SystemZ::LGR:    return SystemZ::LTGR;
1961   case SystemZ::LCDFR:  return SystemZ::LCDBR;
1962   case SystemZ::LPDFR:  return SystemZ::LPDBR;
1963   case SystemZ::LNDFR:  return SystemZ::LNDBR;
1964   case SystemZ::LCDFR_32:  return SystemZ::LCEBR;
1965   case SystemZ::LPDFR_32:  return SystemZ::LPEBR;
1966   case SystemZ::LNDFR_32:  return SystemZ::LNEBR;
1967   // On zEC12 we prefer to use RISBGN.  But if there is a chance to
1968   // actually use the condition code, we may turn it back into RISGB.
1969   // Note that RISBG is not really a "load-and-test" instruction,
1970   // but sets the same condition code values, so is OK to use here.
1971   case SystemZ::RISBGN: return SystemZ::RISBG;
1972   default:              return 0;
1973   }
1974 }
1975 
1976 bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
1977                                    unsigned &Start, unsigned &End) const {
1978   // Reject trivial all-zero masks.
1979   Mask &= allOnes(BitSize);
1980   if (Mask == 0)
1981     return false;
1982 
1983   // Handle the 1+0+ or 0+1+0* cases.  Start then specifies the index of
1984   // the msb and End specifies the index of the lsb.
1985   unsigned LSB, Length;
1986   if (isShiftedMask_64(Mask, LSB, Length)) {
1987     Start = 63 - (LSB + Length - 1);
1988     End = 63 - LSB;
1989     return true;
1990   }
1991 
1992   // Handle the wrap-around 1+0+1+ cases.  Start then specifies the msb
1993   // of the low 1s and End specifies the lsb of the high 1s.
1994   if (isShiftedMask_64(Mask ^ allOnes(BitSize), LSB, Length)) {
1995     assert(LSB > 0 && "Bottom bit must be set");
1996     assert(LSB + Length < BitSize && "Top bit must be set");
1997     Start = 63 - (LSB - 1);
1998     End = 63 - (LSB + Length);
1999     return true;
2000   }
2001 
2002   return false;
2003 }
2004 
2005 unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
2006                                            SystemZII::FusedCompareType Type,
2007                                            const MachineInstr *MI) const {
2008   switch (Opcode) {
2009   case SystemZ::CHI:
2010   case SystemZ::CGHI:
2011     if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
2012       return 0;
2013     break;
2014   case SystemZ::CLFI:
2015   case SystemZ::CLGFI:
2016     if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
2017       return 0;
2018     break;
2019   case SystemZ::CL:
2020   case SystemZ::CLG:
2021     if (!STI.hasMiscellaneousExtensions())
2022       return 0;
2023     if (!(MI && MI->getOperand(3).getReg() == 0))
2024       return 0;
2025     break;
2026   }
2027   switch (Type) {
2028   case SystemZII::CompareAndBranch:
2029     switch (Opcode) {
2030     case SystemZ::CR:
2031       return SystemZ::CRJ;
2032     case SystemZ::CGR:
2033       return SystemZ::CGRJ;
2034     case SystemZ::CHI:
2035       return SystemZ::CIJ;
2036     case SystemZ::CGHI:
2037       return SystemZ::CGIJ;
2038     case SystemZ::CLR:
2039       return SystemZ::CLRJ;
2040     case SystemZ::CLGR:
2041       return SystemZ::CLGRJ;
2042     case SystemZ::CLFI:
2043       return SystemZ::CLIJ;
2044     case SystemZ::CLGFI:
2045       return SystemZ::CLGIJ;
2046     default:
2047       return 0;
2048     }
2049   case SystemZII::CompareAndReturn:
2050     switch (Opcode) {
2051     case SystemZ::CR:
2052       return SystemZ::CRBReturn;
2053     case SystemZ::CGR:
2054       return SystemZ::CGRBReturn;
2055     case SystemZ::CHI:
2056       return SystemZ::CIBReturn;
2057     case SystemZ::CGHI:
2058       return SystemZ::CGIBReturn;
2059     case SystemZ::CLR:
2060       return SystemZ::CLRBReturn;
2061     case SystemZ::CLGR:
2062       return SystemZ::CLGRBReturn;
2063     case SystemZ::CLFI:
2064       return SystemZ::CLIBReturn;
2065     case SystemZ::CLGFI:
2066       return SystemZ::CLGIBReturn;
2067     default:
2068       return 0;
2069     }
2070   case SystemZII::CompareAndSibcall:
2071     switch (Opcode) {
2072     case SystemZ::CR:
2073       return SystemZ::CRBCall;
2074     case SystemZ::CGR:
2075       return SystemZ::CGRBCall;
2076     case SystemZ::CHI:
2077       return SystemZ::CIBCall;
2078     case SystemZ::CGHI:
2079       return SystemZ::CGIBCall;
2080     case SystemZ::CLR:
2081       return SystemZ::CLRBCall;
2082     case SystemZ::CLGR:
2083       return SystemZ::CLGRBCall;
2084     case SystemZ::CLFI:
2085       return SystemZ::CLIBCall;
2086     case SystemZ::CLGFI:
2087       return SystemZ::CLGIBCall;
2088     default:
2089       return 0;
2090     }
2091   case SystemZII::CompareAndTrap:
2092     switch (Opcode) {
2093     case SystemZ::CR:
2094       return SystemZ::CRT;
2095     case SystemZ::CGR:
2096       return SystemZ::CGRT;
2097     case SystemZ::CHI:
2098       return SystemZ::CIT;
2099     case SystemZ::CGHI:
2100       return SystemZ::CGIT;
2101     case SystemZ::CLR:
2102       return SystemZ::CLRT;
2103     case SystemZ::CLGR:
2104       return SystemZ::CLGRT;
2105     case SystemZ::CLFI:
2106       return SystemZ::CLFIT;
2107     case SystemZ::CLGFI:
2108       return SystemZ::CLGIT;
2109     case SystemZ::CL:
2110       return SystemZ::CLT;
2111     case SystemZ::CLG:
2112       return SystemZ::CLGT;
2113     default:
2114       return 0;
2115     }
2116   }
2117   return 0;
2118 }
2119 
2120 bool SystemZInstrInfo::
2121 prepareCompareSwapOperands(MachineBasicBlock::iterator const MBBI) const {
2122   assert(MBBI->isCompare() && MBBI->getOperand(0).isReg() &&
2123          MBBI->getOperand(1).isReg() && !MBBI->mayLoad() &&
2124          "Not a compare reg/reg.");
2125 
2126   MachineBasicBlock *MBB = MBBI->getParent();
2127   bool CCLive = true;
2128   SmallVector<MachineInstr *, 4> CCUsers;
2129   for (MachineInstr &MI : llvm::make_range(std::next(MBBI), MBB->end())) {
2130     if (MI.readsRegister(SystemZ::CC, /*TRI=*/nullptr)) {
2131       unsigned Flags = MI.getDesc().TSFlags;
2132       if ((Flags & SystemZII::CCMaskFirst) || (Flags & SystemZII::CCMaskLast))
2133         CCUsers.push_back(&MI);
2134       else
2135         return false;
2136     }
2137     if (MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr)) {
2138       CCLive = false;
2139       break;
2140     }
2141   }
2142   if (CCLive) {
2143     LiveRegUnits LiveRegs(*MBB->getParent()->getSubtarget().getRegisterInfo());
2144     LiveRegs.addLiveOuts(*MBB);
2145     if (!LiveRegs.available(SystemZ::CC))
2146       return false;
2147   }
2148 
2149   // Update all CC users.
2150   for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) {
2151     unsigned Flags = CCUsers[Idx]->getDesc().TSFlags;
2152     unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ?
2153                            0 : CCUsers[Idx]->getNumExplicitOperands() - 2);
2154     MachineOperand &CCMaskMO = CCUsers[Idx]->getOperand(FirstOpNum + 1);
2155     unsigned NewCCMask = SystemZ::reverseCCMask(CCMaskMO.getImm());
2156     CCMaskMO.setImm(NewCCMask);
2157   }
2158 
2159   return true;
2160 }
2161 
2162 unsigned SystemZ::reverseCCMask(unsigned CCMask) {
2163   return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
2164           ((CCMask & SystemZ::CCMASK_CMP_GT) ? SystemZ::CCMASK_CMP_LT : 0) |
2165           ((CCMask & SystemZ::CCMASK_CMP_LT) ? SystemZ::CCMASK_CMP_GT : 0) |
2166           (CCMask & SystemZ::CCMASK_CMP_UO));
2167 }
2168 
2169 MachineBasicBlock *SystemZ::emitBlockAfter(MachineBasicBlock *MBB) {
2170   MachineFunction &MF = *MBB->getParent();
2171   MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
2172   MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
2173   return NewMBB;
2174 }
2175 
2176 MachineBasicBlock *SystemZ::splitBlockAfter(MachineBasicBlock::iterator MI,
2177                                             MachineBasicBlock *MBB) {
2178   MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
2179   NewMBB->splice(NewMBB->begin(), MBB,
2180                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
2181   NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
2182   return NewMBB;
2183 }
2184 
2185 MachineBasicBlock *SystemZ::splitBlockBefore(MachineBasicBlock::iterator MI,
2186                                              MachineBasicBlock *MBB) {
2187   MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
2188   NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
2189   NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
2190   return NewMBB;
2191 }
2192 
2193 unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const {
2194   if (!STI.hasLoadAndTrap())
2195     return 0;
2196   switch (Opcode) {
2197   case SystemZ::L:
2198   case SystemZ::LY:
2199     return SystemZ::LAT;
2200   case SystemZ::LG:
2201     return SystemZ::LGAT;
2202   case SystemZ::LFH:
2203     return SystemZ::LFHAT;
2204   case SystemZ::LLGF:
2205     return SystemZ::LLGFAT;
2206   case SystemZ::LLGT:
2207     return SystemZ::LLGTAT;
2208   }
2209   return 0;
2210 }
2211 
2212 void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
2213                                      MachineBasicBlock::iterator MBBI,
2214                                      unsigned Reg, uint64_t Value) const {
2215   DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
2216   unsigned Opcode = 0;
2217   if (isInt<16>(Value))
2218     Opcode = SystemZ::LGHI;
2219   else if (SystemZ::isImmLL(Value))
2220     Opcode = SystemZ::LLILL;
2221   else if (SystemZ::isImmLH(Value)) {
2222     Opcode = SystemZ::LLILH;
2223     Value >>= 16;
2224   }
2225   else if (isInt<32>(Value))
2226     Opcode = SystemZ::LGFI;
2227   if (Opcode) {
2228     BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
2229     return;
2230   }
2231 
2232   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2233   assert (MRI.isSSA() &&  "Huge values only handled before reg-alloc .");
2234   Register Reg0 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2235   Register Reg1 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2236   BuildMI(MBB, MBBI, DL, get(SystemZ::IMPLICIT_DEF), Reg0);
2237   BuildMI(MBB, MBBI, DL, get(SystemZ::IIHF64), Reg1)
2238     .addReg(Reg0).addImm(Value >> 32);
2239   BuildMI(MBB, MBBI, DL, get(SystemZ::IILF64), Reg)
2240     .addReg(Reg1).addImm(Value & ((uint64_t(1) << 32) - 1));
2241 }
2242 
2243 bool SystemZInstrInfo::verifyInstruction(const MachineInstr &MI,
2244                                          StringRef &ErrInfo) const {
2245   const MCInstrDesc &MCID = MI.getDesc();
2246   for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
2247     if (I >= MCID.getNumOperands())
2248       break;
2249     const MachineOperand &Op = MI.getOperand(I);
2250     const MCOperandInfo &MCOI = MCID.operands()[I];
2251     // Addressing modes have register and immediate operands. Op should be a
2252     // register (or frame index) operand if MCOI.RegClass contains a valid
2253     // register class, or an immediate otherwise.
2254     if (MCOI.OperandType == MCOI::OPERAND_MEMORY &&
2255         ((MCOI.RegClass != -1 && !Op.isReg() && !Op.isFI()) ||
2256          (MCOI.RegClass == -1 && !Op.isImm()))) {
2257       ErrInfo = "Addressing mode operands corrupt!";
2258       return false;
2259     }
2260   }
2261 
2262   return true;
2263 }
2264 
2265 bool SystemZInstrInfo::
2266 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
2267                                 const MachineInstr &MIb) const {
2268 
2269   if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
2270     return false;
2271 
2272   // If mem-operands show that the same address Value is used by both
2273   // instructions, check for non-overlapping offsets and widths. Not
2274   // sure if a register based analysis would be an improvement...
2275 
2276   MachineMemOperand *MMOa = *MIa.memoperands_begin();
2277   MachineMemOperand *MMOb = *MIb.memoperands_begin();
2278   const Value *VALa = MMOa->getValue();
2279   const Value *VALb = MMOb->getValue();
2280   bool SameVal = (VALa && VALb && (VALa == VALb));
2281   if (!SameVal) {
2282     const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
2283     const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
2284     if (PSVa && PSVb && (PSVa == PSVb))
2285       SameVal = true;
2286   }
2287   if (SameVal) {
2288     int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset();
2289     LocationSize WidthA = MMOa->getSize(), WidthB = MMOb->getSize();
2290     int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
2291     int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
2292     LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2293     if (LowWidth.hasValue() &&
2294         LowOffset + (int)LowWidth.getValue() <= HighOffset)
2295       return true;
2296   }
2297 
2298   return false;
2299 }
2300 
2301 bool SystemZInstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
2302                                                const Register Reg,
2303                                                int64_t &ImmVal) const {
2304 
2305   if (MI.getOpcode() == SystemZ::VGBM && Reg == MI.getOperand(0).getReg()) {
2306     ImmVal = MI.getOperand(1).getImm();
2307     // TODO: Handle non-0 values
2308     return ImmVal == 0;
2309   }
2310 
2311   return false;
2312 }
2313