xref: /llvm-project/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp (revision 778138114e9e42e28fcb51c0a38224e667a3790c)
1 //===-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines an instruction selector for the SystemZ target.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SystemZTargetMachine.h"
14 #include "SystemZISelLowering.h"
15 #include "llvm/Analysis/AliasAnalysis.h"
16 #include "llvm/CodeGen/SelectionDAGISel.h"
17 #include "llvm/Support/Debug.h"
18 #include "llvm/Support/KnownBits.h"
19 #include "llvm/Support/raw_ostream.h"
20 
21 using namespace llvm;
22 
23 #define DEBUG_TYPE "systemz-isel"
24 #define PASS_NAME "SystemZ DAG->DAG Pattern Instruction Selection"
25 
26 namespace {
27 // Used to build addressing modes.
28 struct SystemZAddressingMode {
29   // The shape of the address.
30   enum AddrForm {
31     // base+displacement
32     FormBD,
33 
34     // base+displacement+index for load and store operands
35     FormBDXNormal,
36 
37     // base+displacement+index for load address operands
38     FormBDXLA,
39 
40     // base+displacement+index+ADJDYNALLOC
41     FormBDXDynAlloc
42   };
43   AddrForm Form;
44 
45   // The type of displacement.  The enum names here correspond directly
46   // to the definitions in SystemZOperand.td.  We could split them into
47   // flags -- single/pair, 128-bit, etc. -- but it hardly seems worth it.
48   enum DispRange {
49     Disp12Only,
50     Disp12Pair,
51     Disp20Only,
52     Disp20Only128,
53     Disp20Pair
54   };
55   DispRange DR;
56 
57   // The parts of the address.  The address is equivalent to:
58   //
59   //     Base + Disp + Index + (IncludesDynAlloc ? ADJDYNALLOC : 0)
60   SDValue Base;
61   int64_t Disp;
62   SDValue Index;
63   bool IncludesDynAlloc;
64 
65   SystemZAddressingMode(AddrForm form, DispRange dr)
66       : Form(form), DR(dr), Disp(0), IncludesDynAlloc(false) {}
67 
68   // True if the address can have an index register.
69   bool hasIndexField() { return Form != FormBD; }
70 
71   // True if the address can (and must) include ADJDYNALLOC.
72   bool isDynAlloc() { return Form == FormBDXDynAlloc; }
73 
74   void dump(const llvm::SelectionDAG *DAG) {
75     errs() << "SystemZAddressingMode " << this << '\n';
76 
77     errs() << " Base ";
78     if (Base.getNode())
79       Base.getNode()->dump(DAG);
80     else
81       errs() << "null\n";
82 
83     if (hasIndexField()) {
84       errs() << " Index ";
85       if (Index.getNode())
86         Index.getNode()->dump(DAG);
87       else
88         errs() << "null\n";
89     }
90 
91     errs() << " Disp " << Disp;
92     if (IncludesDynAlloc)
93       errs() << " + ADJDYNALLOC";
94     errs() << '\n';
95   }
96 };
97 
98 // Return a mask with Count low bits set.
99 static uint64_t allOnes(unsigned int Count) {
100   assert(Count <= 64);
101   if (Count > 63)
102     return UINT64_MAX;
103   return (uint64_t(1) << Count) - 1;
104 }
105 
106 // Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation
107 // given by Opcode.  The operands are: Input (R2), Start (I3), End (I4) and
108 // Rotate (I5).  The combined operand value is effectively:
109 //
110 //   (or (rotl Input, Rotate), ~Mask)
111 //
112 // for RNSBG and:
113 //
114 //   (and (rotl Input, Rotate), Mask)
115 //
116 // otherwise.  The output value has BitSize bits, although Input may be
117 // narrower (in which case the upper bits are don't care), or wider (in which
118 // case the result will be truncated as part of the operation).
119 struct RxSBGOperands {
120   RxSBGOperands(unsigned Op, SDValue N)
121     : Opcode(Op), BitSize(N.getValueSizeInBits()),
122       Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63),
123       Rotate(0) {}
124 
125   unsigned Opcode;
126   unsigned BitSize;
127   uint64_t Mask;
128   SDValue Input;
129   unsigned Start;
130   unsigned End;
131   unsigned Rotate;
132 };
133 
134 class SystemZDAGToDAGISel : public SelectionDAGISel {
135   const SystemZSubtarget *Subtarget;
136 
137   // Used by SystemZOperands.td to create integer constants.
138   inline SDValue getImm(const SDNode *Node, uint64_t Imm) const {
139     return CurDAG->getTargetConstant(Imm, SDLoc(Node), Node->getValueType(0));
140   }
141 
142   const SystemZTargetMachine &getTargetMachine() const {
143     return static_cast<const SystemZTargetMachine &>(TM);
144   }
145 
146   const SystemZInstrInfo *getInstrInfo() const {
147     return Subtarget->getInstrInfo();
148   }
149 
150   // Try to fold more of the base or index of AM into AM, where IsBase
151   // selects between the base and index.
152   bool expandAddress(SystemZAddressingMode &AM, bool IsBase) const;
153 
154   // Try to describe N in AM, returning true on success.
155   bool selectAddress(SDValue N, SystemZAddressingMode &AM) const;
156 
157   // Extract individual target operands from matched address AM.
158   void getAddressOperands(const SystemZAddressingMode &AM, EVT VT,
159                           SDValue &Base, SDValue &Disp) const;
160   void getAddressOperands(const SystemZAddressingMode &AM, EVT VT,
161                           SDValue &Base, SDValue &Disp, SDValue &Index) const;
162 
163   // Try to match Addr as a FormBD address with displacement type DR.
164   // Return true on success, storing the base and displacement in
165   // Base and Disp respectively.
166   bool selectBDAddr(SystemZAddressingMode::DispRange DR, SDValue Addr,
167                     SDValue &Base, SDValue &Disp) const;
168 
169   // Try to match Addr as a FormBDX address with displacement type DR.
170   // Return true on success and if the result had no index.  Store the
171   // base and displacement in Base and Disp respectively.
172   bool selectMVIAddr(SystemZAddressingMode::DispRange DR, SDValue Addr,
173                      SDValue &Base, SDValue &Disp) const;
174 
175   // Try to match Addr as a FormBDX* address of form Form with
176   // displacement type DR.  Return true on success, storing the base,
177   // displacement and index in Base, Disp and Index respectively.
178   bool selectBDXAddr(SystemZAddressingMode::AddrForm Form,
179                      SystemZAddressingMode::DispRange DR, SDValue Addr,
180                      SDValue &Base, SDValue &Disp, SDValue &Index) const;
181 
182   // PC-relative address matching routines used by SystemZOperands.td.
183   bool selectPCRelAddress(SDValue Addr, SDValue &Target) const {
184     if (SystemZISD::isPCREL(Addr.getOpcode())) {
185       Target = Addr.getOperand(0);
186       return true;
187     }
188     return false;
189   }
190 
191   // BD matching routines used by SystemZOperands.td.
192   bool selectBDAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp) const {
193     return selectBDAddr(SystemZAddressingMode::Disp12Only, Addr, Base, Disp);
194   }
195   bool selectBDAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
196     return selectBDAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp);
197   }
198   bool selectBDAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp) const {
199     return selectBDAddr(SystemZAddressingMode::Disp20Only, Addr, Base, Disp);
200   }
201   bool selectBDAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
202     return selectBDAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp);
203   }
204 
205   // MVI matching routines used by SystemZOperands.td.
206   bool selectMVIAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
207     return selectMVIAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp);
208   }
209   bool selectMVIAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
210     return selectMVIAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp);
211   }
212 
213   // BDX matching routines used by SystemZOperands.td.
214   bool selectBDXAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp,
215                            SDValue &Index) const {
216     return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
217                          SystemZAddressingMode::Disp12Only,
218                          Addr, Base, Disp, Index);
219   }
220   bool selectBDXAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
221                            SDValue &Index) const {
222     return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
223                          SystemZAddressingMode::Disp12Pair,
224                          Addr, Base, Disp, Index);
225   }
226   bool selectDynAlloc12Only(SDValue Addr, SDValue &Base, SDValue &Disp,
227                             SDValue &Index) const {
228     return selectBDXAddr(SystemZAddressingMode::FormBDXDynAlloc,
229                          SystemZAddressingMode::Disp12Only,
230                          Addr, Base, Disp, Index);
231   }
232   bool selectBDXAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp,
233                            SDValue &Index) const {
234     return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
235                          SystemZAddressingMode::Disp20Only,
236                          Addr, Base, Disp, Index);
237   }
238   bool selectBDXAddr20Only128(SDValue Addr, SDValue &Base, SDValue &Disp,
239                               SDValue &Index) const {
240     return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
241                          SystemZAddressingMode::Disp20Only128,
242                          Addr, Base, Disp, Index);
243   }
244   bool selectBDXAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
245                            SDValue &Index) const {
246     return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
247                          SystemZAddressingMode::Disp20Pair,
248                          Addr, Base, Disp, Index);
249   }
250   bool selectLAAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
251                           SDValue &Index) const {
252     return selectBDXAddr(SystemZAddressingMode::FormBDXLA,
253                          SystemZAddressingMode::Disp12Pair,
254                          Addr, Base, Disp, Index);
255   }
256   bool selectLAAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
257                           SDValue &Index) const {
258     return selectBDXAddr(SystemZAddressingMode::FormBDXLA,
259                          SystemZAddressingMode::Disp20Pair,
260                          Addr, Base, Disp, Index);
261   }
262 
263   // Try to match Addr as an address with a base, 12-bit displacement
264   // and index, where the index is element Elem of a vector.
265   // Return true on success, storing the base, displacement and vector
266   // in Base, Disp and Index respectively.
267   bool selectBDVAddr12Only(SDValue Addr, SDValue Elem, SDValue &Base,
268                            SDValue &Disp, SDValue &Index) const;
269 
270   // Check whether (or Op (and X InsertMask)) is effectively an insertion
271   // of X into bits InsertMask of some Y != Op.  Return true if so and
272   // set Op to that Y.
273   bool detectOrAndInsertion(SDValue &Op, uint64_t InsertMask) const;
274 
275   // Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used.
276   // Return true on success.
277   bool refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask) const;
278 
279   // Try to fold some of RxSBG.Input into other fields of RxSBG.
280   // Return true on success.
281   bool expandRxSBG(RxSBGOperands &RxSBG) const;
282 
283   // Return an undefined value of type VT.
284   SDValue getUNDEF(const SDLoc &DL, EVT VT) const;
285 
286   // Convert N to VT, if it isn't already.
287   SDValue convertTo(const SDLoc &DL, EVT VT, SDValue N) const;
288 
289   // Try to implement AND or shift node N using RISBG with the zero flag set.
290   // Return the selected node on success, otherwise return null.
291   bool tryRISBGZero(SDNode *N);
292 
293   // Try to use RISBG or Opcode to implement OR or XOR node N.
294   // Return the selected node on success, otherwise return null.
295   bool tryRxSBG(SDNode *N, unsigned Opcode);
296 
297   // If Op0 is null, then Node is a constant that can be loaded using:
298   //
299   //   (Opcode UpperVal LowerVal)
300   //
301   // If Op0 is nonnull, then Node can be implemented using:
302   //
303   //   (Opcode (Opcode Op0 UpperVal) LowerVal)
304   void splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0,
305                            uint64_t UpperVal, uint64_t LowerVal);
306 
307   void loadVectorConstant(const SystemZVectorConstantInfo &VCI,
308                           SDNode *Node);
309 
310   SDNode *loadPoolVectorConstant(APInt Val, EVT VT, SDLoc DL);
311 
312   // Try to use gather instruction Opcode to implement vector insertion N.
313   bool tryGather(SDNode *N, unsigned Opcode);
314 
315   // Try to use scatter instruction Opcode to implement store Store.
316   bool tryScatter(StoreSDNode *Store, unsigned Opcode);
317 
318   // Change a chain of {load; op; store} of the same value into a simple op
319   // through memory of that value, if the uses of the modified value and its
320   // address are suitable.
321   bool tryFoldLoadStoreIntoMemOperand(SDNode *Node);
322 
323   // Return true if Load and Store are loads and stores of the same size
324   // and are guaranteed not to overlap.  Such operations can be implemented
325   // using block (SS-format) instructions.
326   //
327   // Partial overlap would lead to incorrect code, since the block operations
328   // are logically bytewise, even though they have a fast path for the
329   // non-overlapping case.  We also need to avoid full overlap (i.e. two
330   // addresses that might be equal at run time) because although that case
331   // would be handled correctly, it might be implemented by millicode.
332   bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load) const;
333 
334   // N is a (store (load Y), X) pattern.  Return true if it can use an MVC
335   // from Y to X.
336   bool storeLoadCanUseMVC(SDNode *N) const;
337 
338   // N is a (store (op (load A[0]), (load A[1])), X) pattern.  Return true
339   // if A[1 - I] == X and if N can use a block operation like NC from A[I]
340   // to X.
341   bool storeLoadCanUseBlockBinary(SDNode *N, unsigned I) const;
342 
343   // Return true if N (a load or a store) fullfills the alignment
344   // requirements for a PC-relative access.
345   bool storeLoadIsAligned(SDNode *N) const;
346 
347   // Return the load extension type of a load or atomic load.
348   ISD::LoadExtType getLoadExtType(SDNode *N) const;
349 
350   // Try to expand a boolean SELECT_CCMASK using an IPM sequence.
351   SDValue expandSelectBoolean(SDNode *Node);
352 
353   // Return true if the flags of N and the subtarget allows for
354   // reassociation, in which case a reg/reg opcode is needed as input to the
355   // MachineCombiner.
356   bool shouldSelectForReassoc(SDNode *N) const;
357 
358 public:
359   SystemZDAGToDAGISel() = delete;
360 
361   SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOptLevel OptLevel)
362       : SelectionDAGISel(TM, OptLevel) {}
363 
364   bool runOnMachineFunction(MachineFunction &MF) override {
365     const Function &F = MF.getFunction();
366     if (F.getFnAttribute("fentry-call").getValueAsString() != "true") {
367       if (F.hasFnAttribute("mnop-mcount"))
368         report_fatal_error("mnop-mcount only supported with fentry-call");
369       if (F.hasFnAttribute("mrecord-mcount"))
370         report_fatal_error("mrecord-mcount only supported with fentry-call");
371     }
372 
373     Subtarget = &MF.getSubtarget<SystemZSubtarget>();
374     return SelectionDAGISel::runOnMachineFunction(MF);
375   }
376 
377   // Override SelectionDAGISel.
378   void Select(SDNode *Node) override;
379   bool SelectInlineAsmMemoryOperand(const SDValue &Op,
380                                     InlineAsm::ConstraintCode ConstraintID,
381                                     std::vector<SDValue> &OutOps) override;
382   bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
383   void PreprocessISelDAG() override;
384 
385   // Include the pieces autogenerated from the target description.
386   #include "SystemZGenDAGISel.inc"
387 };
388 
389 class SystemZDAGToDAGISelLegacy : public SelectionDAGISelLegacy {
390 public:
391   static char ID;
392   explicit SystemZDAGToDAGISelLegacy(SystemZTargetMachine &TM,
393                                      CodeGenOptLevel OptLevel)
394       : SelectionDAGISelLegacy(
395             ID, std::make_unique<SystemZDAGToDAGISel>(TM, OptLevel)) {}
396 };
397 } // end anonymous namespace
398 
399 char SystemZDAGToDAGISelLegacy::ID = 0;
400 
401 INITIALIZE_PASS(SystemZDAGToDAGISelLegacy, DEBUG_TYPE, PASS_NAME, false, false)
402 
403 FunctionPass *llvm::createSystemZISelDag(SystemZTargetMachine &TM,
404                                          CodeGenOptLevel OptLevel) {
405   return new SystemZDAGToDAGISelLegacy(TM, OptLevel);
406 }
407 
408 // Return true if Val should be selected as a displacement for an address
409 // with range DR.  Here we're interested in the range of both the instruction
410 // described by DR and of any pairing instruction.
411 static bool selectDisp(SystemZAddressingMode::DispRange DR, int64_t Val) {
412   switch (DR) {
413   case SystemZAddressingMode::Disp12Only:
414     return isUInt<12>(Val);
415 
416   case SystemZAddressingMode::Disp12Pair:
417   case SystemZAddressingMode::Disp20Only:
418   case SystemZAddressingMode::Disp20Pair:
419     return isInt<20>(Val);
420 
421   case SystemZAddressingMode::Disp20Only128:
422     return isInt<20>(Val) && isInt<20>(Val + 8);
423   }
424   llvm_unreachable("Unhandled displacement range");
425 }
426 
427 // Change the base or index in AM to Value, where IsBase selects
428 // between the base and index.
429 static void changeComponent(SystemZAddressingMode &AM, bool IsBase,
430                             SDValue Value) {
431   if (IsBase)
432     AM.Base = Value;
433   else
434     AM.Index = Value;
435 }
436 
437 // The base or index of AM is equivalent to Value + ADJDYNALLOC,
438 // where IsBase selects between the base and index.  Try to fold the
439 // ADJDYNALLOC into AM.
440 static bool expandAdjDynAlloc(SystemZAddressingMode &AM, bool IsBase,
441                               SDValue Value) {
442   if (AM.isDynAlloc() && !AM.IncludesDynAlloc) {
443     changeComponent(AM, IsBase, Value);
444     AM.IncludesDynAlloc = true;
445     return true;
446   }
447   return false;
448 }
449 
450 // The base of AM is equivalent to Base + Index.  Try to use Index as
451 // the index register.
452 static bool expandIndex(SystemZAddressingMode &AM, SDValue Base,
453                         SDValue Index) {
454   if (AM.hasIndexField() && !AM.Index.getNode()) {
455     AM.Base = Base;
456     AM.Index = Index;
457     return true;
458   }
459   return false;
460 }
461 
462 // The base or index of AM is equivalent to Op0 + Op1, where IsBase selects
463 // between the base and index.  Try to fold Op1 into AM's displacement.
464 static bool expandDisp(SystemZAddressingMode &AM, bool IsBase,
465                        SDValue Op0, uint64_t Op1) {
466   // First try adjusting the displacement.
467   int64_t TestDisp = AM.Disp + Op1;
468   if (selectDisp(AM.DR, TestDisp)) {
469     changeComponent(AM, IsBase, Op0);
470     AM.Disp = TestDisp;
471     return true;
472   }
473 
474   // We could consider forcing the displacement into a register and
475   // using it as an index, but it would need to be carefully tuned.
476   return false;
477 }
478 
479 bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode &AM,
480                                         bool IsBase) const {
481   SDValue N = IsBase ? AM.Base : AM.Index;
482   unsigned Opcode = N.getOpcode();
483   // Look through no-op truncations.
484   if (Opcode == ISD::TRUNCATE && N.getOperand(0).getValueSizeInBits() <= 64) {
485     N = N.getOperand(0);
486     Opcode = N.getOpcode();
487   }
488   if (Opcode == ISD::ADD || CurDAG->isBaseWithConstantOffset(N)) {
489     SDValue Op0 = N.getOperand(0);
490     SDValue Op1 = N.getOperand(1);
491 
492     unsigned Op0Code = Op0->getOpcode();
493     unsigned Op1Code = Op1->getOpcode();
494 
495     if (Op0Code == SystemZISD::ADJDYNALLOC)
496       return expandAdjDynAlloc(AM, IsBase, Op1);
497     if (Op1Code == SystemZISD::ADJDYNALLOC)
498       return expandAdjDynAlloc(AM, IsBase, Op0);
499 
500     if (Op0Code == ISD::Constant)
501       return expandDisp(AM, IsBase, Op1,
502                         cast<ConstantSDNode>(Op0)->getSExtValue());
503     if (Op1Code == ISD::Constant)
504       return expandDisp(AM, IsBase, Op0,
505                         cast<ConstantSDNode>(Op1)->getSExtValue());
506 
507     if (IsBase && expandIndex(AM, Op0, Op1))
508       return true;
509   }
510   if (Opcode == SystemZISD::PCREL_OFFSET) {
511     SDValue Full = N.getOperand(0);
512     SDValue Base = N.getOperand(1);
513     SDValue Anchor = Base.getOperand(0);
514     uint64_t Offset = (cast<GlobalAddressSDNode>(Full)->getOffset() -
515                        cast<GlobalAddressSDNode>(Anchor)->getOffset());
516     return expandDisp(AM, IsBase, Base, Offset);
517   }
518   return false;
519 }
520 
521 // Return true if an instruction with displacement range DR should be
522 // used for displacement value Val.  selectDisp(DR, Val) must already hold.
523 static bool isValidDisp(SystemZAddressingMode::DispRange DR, int64_t Val) {
524   assert(selectDisp(DR, Val) && "Invalid displacement");
525   switch (DR) {
526   case SystemZAddressingMode::Disp12Only:
527   case SystemZAddressingMode::Disp20Only:
528   case SystemZAddressingMode::Disp20Only128:
529     return true;
530 
531   case SystemZAddressingMode::Disp12Pair:
532     // Use the other instruction if the displacement is too large.
533     return isUInt<12>(Val);
534 
535   case SystemZAddressingMode::Disp20Pair:
536     // Use the other instruction if the displacement is small enough.
537     return !isUInt<12>(Val);
538   }
539   llvm_unreachable("Unhandled displacement range");
540 }
541 
542 // Return true if Base + Disp + Index should be performed by LA(Y).
543 static bool shouldUseLA(SDNode *Base, int64_t Disp, SDNode *Index) {
544   // Don't use LA(Y) for constants.
545   if (!Base)
546     return false;
547 
548   // Always use LA(Y) for frame addresses, since we know that the destination
549   // register is almost always (perhaps always) going to be different from
550   // the frame register.
551   if (Base->getOpcode() == ISD::FrameIndex)
552     return true;
553 
554   if (Disp) {
555     // Always use LA(Y) if there is a base, displacement and index.
556     if (Index)
557       return true;
558 
559     // Always use LA if the displacement is small enough.  It should always
560     // be no worse than AGHI (and better if it avoids a move).
561     if (isUInt<12>(Disp))
562       return true;
563 
564     // For similar reasons, always use LAY if the constant is too big for AGHI.
565     // LAY should be no worse than AGFI.
566     if (!isInt<16>(Disp))
567       return true;
568   } else {
569     // Don't use LA for plain registers.
570     if (!Index)
571       return false;
572 
573     // Don't use LA for plain addition if the index operand is only used
574     // once.  It should be a natural two-operand addition in that case.
575     if (Index->hasOneUse())
576       return false;
577 
578     // Prefer addition if the second operation is sign-extended, in the
579     // hope of using AGF.
580     unsigned IndexOpcode = Index->getOpcode();
581     if (IndexOpcode == ISD::SIGN_EXTEND ||
582         IndexOpcode == ISD::SIGN_EXTEND_INREG)
583       return false;
584   }
585 
586   // Don't use LA for two-operand addition if either operand is only
587   // used once.  The addition instructions are better in that case.
588   if (Base->hasOneUse())
589     return false;
590 
591   return true;
592 }
593 
594 // Return true if Addr is suitable for AM, updating AM if so.
595 bool SystemZDAGToDAGISel::selectAddress(SDValue Addr,
596                                         SystemZAddressingMode &AM) const {
597   // Start out assuming that the address will need to be loaded separately,
598   // then try to extend it as much as we can.
599   AM.Base = Addr;
600 
601   // First try treating the address as a constant.
602   if (Addr.getOpcode() == ISD::Constant &&
603       expandDisp(AM, true, SDValue(),
604                  cast<ConstantSDNode>(Addr)->getSExtValue()))
605     ;
606   // Also see if it's a bare ADJDYNALLOC.
607   else if (Addr.getOpcode() == SystemZISD::ADJDYNALLOC &&
608            expandAdjDynAlloc(AM, true, SDValue()))
609     ;
610   else
611     // Otherwise try expanding each component.
612     while (expandAddress(AM, true) ||
613            (AM.Index.getNode() && expandAddress(AM, false)))
614       continue;
615 
616   // Reject cases where it isn't profitable to use LA(Y).
617   if (AM.Form == SystemZAddressingMode::FormBDXLA &&
618       !shouldUseLA(AM.Base.getNode(), AM.Disp, AM.Index.getNode()))
619     return false;
620 
621   // Reject cases where the other instruction in a pair should be used.
622   if (!isValidDisp(AM.DR, AM.Disp))
623     return false;
624 
625   // Make sure that ADJDYNALLOC is included where necessary.
626   if (AM.isDynAlloc() && !AM.IncludesDynAlloc)
627     return false;
628 
629   LLVM_DEBUG(AM.dump(CurDAG));
630   return true;
631 }
632 
633 // Insert a node into the DAG at least before Pos.  This will reposition
634 // the node as needed, and will assign it a node ID that is <= Pos's ID.
635 // Note that this does *not* preserve the uniqueness of node IDs!
636 // The selection DAG must no longer depend on their uniqueness when this
637 // function is used.
638 static void insertDAGNode(SelectionDAG *DAG, SDNode *Pos, SDValue N) {
639   if (N->getNodeId() == -1 ||
640       (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) >
641        SelectionDAGISel::getUninvalidatedNodeId(Pos))) {
642     DAG->RepositionNode(Pos->getIterator(), N.getNode());
643     // Mark Node as invalid for pruning as after this it may be a successor to a
644     // selected node but otherwise be in the same position of Pos.
645     // Conservatively mark it with the same -abs(Id) to assure node id
646     // invariant is preserved.
647     N->setNodeId(Pos->getNodeId());
648     SelectionDAGISel::InvalidateNodeId(N.getNode());
649   }
650 }
651 
652 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
653                                              EVT VT, SDValue &Base,
654                                              SDValue &Disp) const {
655   Base = AM.Base;
656   if (!Base.getNode())
657     // Register 0 means "no base".  This is mostly useful for shifts.
658     Base = CurDAG->getRegister(0, VT);
659   else if (Base.getOpcode() == ISD::FrameIndex) {
660     // Lower a FrameIndex to a TargetFrameIndex.
661     int64_t FrameIndex = cast<FrameIndexSDNode>(Base)->getIndex();
662     Base = CurDAG->getTargetFrameIndex(FrameIndex, VT);
663   } else if (Base.getValueType() != VT) {
664     // Truncate values from i64 to i32, for shifts.
665     assert(VT == MVT::i32 && Base.getValueType() == MVT::i64 &&
666            "Unexpected truncation");
667     SDLoc DL(Base);
668     SDValue Trunc = CurDAG->getNode(ISD::TRUNCATE, DL, VT, Base);
669     insertDAGNode(CurDAG, Base.getNode(), Trunc);
670     Base = Trunc;
671   }
672 
673   // Lower the displacement to a TargetConstant.
674   Disp = CurDAG->getSignedTargetConstant(AM.Disp, SDLoc(Base), VT);
675 }
676 
677 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
678                                              EVT VT, SDValue &Base,
679                                              SDValue &Disp,
680                                              SDValue &Index) const {
681   getAddressOperands(AM, VT, Base, Disp);
682 
683   Index = AM.Index;
684   if (!Index.getNode())
685     // Register 0 means "no index".
686     Index = CurDAG->getRegister(0, VT);
687 }
688 
689 bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR,
690                                        SDValue Addr, SDValue &Base,
691                                        SDValue &Disp) const {
692   SystemZAddressingMode AM(SystemZAddressingMode::FormBD, DR);
693   if (!selectAddress(Addr, AM))
694     return false;
695 
696   getAddressOperands(AM, Addr.getValueType(), Base, Disp);
697   return true;
698 }
699 
700 bool SystemZDAGToDAGISel::selectMVIAddr(SystemZAddressingMode::DispRange DR,
701                                         SDValue Addr, SDValue &Base,
702                                         SDValue &Disp) const {
703   SystemZAddressingMode AM(SystemZAddressingMode::FormBDXNormal, DR);
704   if (!selectAddress(Addr, AM) || AM.Index.getNode())
705     return false;
706 
707   getAddressOperands(AM, Addr.getValueType(), Base, Disp);
708   return true;
709 }
710 
711 bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form,
712                                         SystemZAddressingMode::DispRange DR,
713                                         SDValue Addr, SDValue &Base,
714                                         SDValue &Disp, SDValue &Index) const {
715   SystemZAddressingMode AM(Form, DR);
716   if (!selectAddress(Addr, AM))
717     return false;
718 
719   getAddressOperands(AM, Addr.getValueType(), Base, Disp, Index);
720   return true;
721 }
722 
723 bool SystemZDAGToDAGISel::selectBDVAddr12Only(SDValue Addr, SDValue Elem,
724                                               SDValue &Base,
725                                               SDValue &Disp,
726                                               SDValue &Index) const {
727   SDValue Regs[2];
728   if (selectBDXAddr12Only(Addr, Regs[0], Disp, Regs[1]) &&
729       Regs[0].getNode() && Regs[1].getNode()) {
730     for (unsigned int I = 0; I < 2; ++I) {
731       Base = Regs[I];
732       Index = Regs[1 - I];
733       // We can't tell here whether the index vector has the right type
734       // for the access; the caller needs to do that instead.
735       if (Index.getOpcode() == ISD::ZERO_EXTEND)
736         Index = Index.getOperand(0);
737       if (Index.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
738           Index.getOperand(1) == Elem) {
739         Index = Index.getOperand(0);
740         return true;
741       }
742     }
743   }
744   return false;
745 }
746 
747 bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
748                                                uint64_t InsertMask) const {
749   // We're only interested in cases where the insertion is into some operand
750   // of Op, rather than into Op itself.  The only useful case is an AND.
751   if (Op.getOpcode() != ISD::AND)
752     return false;
753 
754   // We need a constant mask.
755   auto *MaskNode = dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
756   if (!MaskNode)
757     return false;
758 
759   // It's not an insertion of Op.getOperand(0) if the two masks overlap.
760   uint64_t AndMask = MaskNode->getZExtValue();
761   if (InsertMask & AndMask)
762     return false;
763 
764   // It's only an insertion if all bits are covered or are known to be zero.
765   // The inner check covers all cases but is more expensive.
766   uint64_t Used = allOnes(Op.getValueSizeInBits());
767   if (Used != (AndMask | InsertMask)) {
768     KnownBits Known = CurDAG->computeKnownBits(Op.getOperand(0));
769     if (Used != (AndMask | InsertMask | Known.Zero.getZExtValue()))
770       return false;
771   }
772 
773   Op = Op.getOperand(0);
774   return true;
775 }
776 
777 bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands &RxSBG,
778                                           uint64_t Mask) const {
779   const SystemZInstrInfo *TII = getInstrInfo();
780   if (RxSBG.Rotate != 0)
781     Mask = (Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate));
782   Mask &= RxSBG.Mask;
783   if (TII->isRxSBGMask(Mask, RxSBG.BitSize, RxSBG.Start, RxSBG.End)) {
784     RxSBG.Mask = Mask;
785     return true;
786   }
787   return false;
788 }
789 
790 // Return true if any bits of (RxSBG.Input & Mask) are significant.
791 static bool maskMatters(RxSBGOperands &RxSBG, uint64_t Mask) {
792   // Rotate the mask in the same way as RxSBG.Input is rotated.
793   if (RxSBG.Rotate != 0)
794     Mask = ((Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)));
795   return (Mask & RxSBG.Mask) != 0;
796 }
797 
798 bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
799   SDValue N = RxSBG.Input;
800   unsigned Opcode = N.getOpcode();
801   switch (Opcode) {
802   case ISD::TRUNCATE: {
803     if (RxSBG.Opcode == SystemZ::RNSBG)
804       return false;
805     if (N.getOperand(0).getValueSizeInBits() > 64)
806       return false;
807     uint64_t BitSize = N.getValueSizeInBits();
808     uint64_t Mask = allOnes(BitSize);
809     if (!refineRxSBGMask(RxSBG, Mask))
810       return false;
811     RxSBG.Input = N.getOperand(0);
812     return true;
813   }
814   case ISD::AND: {
815     if (RxSBG.Opcode == SystemZ::RNSBG)
816       return false;
817 
818     auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
819     if (!MaskNode)
820       return false;
821 
822     SDValue Input = N.getOperand(0);
823     uint64_t Mask = MaskNode->getZExtValue();
824     if (!refineRxSBGMask(RxSBG, Mask)) {
825       // If some bits of Input are already known zeros, those bits will have
826       // been removed from the mask.  See if adding them back in makes the
827       // mask suitable.
828       KnownBits Known = CurDAG->computeKnownBits(Input);
829       Mask |= Known.Zero.getZExtValue();
830       if (!refineRxSBGMask(RxSBG, Mask))
831         return false;
832     }
833     RxSBG.Input = Input;
834     return true;
835   }
836 
837   case ISD::OR: {
838     if (RxSBG.Opcode != SystemZ::RNSBG)
839       return false;
840 
841     auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
842     if (!MaskNode)
843       return false;
844 
845     SDValue Input = N.getOperand(0);
846     uint64_t Mask = ~MaskNode->getZExtValue();
847     if (!refineRxSBGMask(RxSBG, Mask)) {
848       // If some bits of Input are already known ones, those bits will have
849       // been removed from the mask.  See if adding them back in makes the
850       // mask suitable.
851       KnownBits Known = CurDAG->computeKnownBits(Input);
852       Mask &= ~Known.One.getZExtValue();
853       if (!refineRxSBGMask(RxSBG, Mask))
854         return false;
855     }
856     RxSBG.Input = Input;
857     return true;
858   }
859 
860   case ISD::ROTL: {
861     // Any 64-bit rotate left can be merged into the RxSBG.
862     if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64)
863       return false;
864     auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
865     if (!CountNode)
866       return false;
867 
868     RxSBG.Rotate = (RxSBG.Rotate + CountNode->getZExtValue()) & 63;
869     RxSBG.Input = N.getOperand(0);
870     return true;
871   }
872 
873   case ISD::ANY_EXTEND:
874     // Bits above the extended operand are don't-care.
875     RxSBG.Input = N.getOperand(0);
876     return true;
877 
878   case ISD::ZERO_EXTEND:
879     if (RxSBG.Opcode != SystemZ::RNSBG) {
880       // Restrict the mask to the extended operand.
881       unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits();
882       if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize)))
883         return false;
884 
885       RxSBG.Input = N.getOperand(0);
886       return true;
887     }
888     [[fallthrough]];
889 
890   case ISD::SIGN_EXTEND: {
891     // Check that the extension bits are don't-care (i.e. are masked out
892     // by the final mask).
893     unsigned BitSize = N.getValueSizeInBits();
894     unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits();
895     if (maskMatters(RxSBG, allOnes(BitSize) - allOnes(InnerBitSize))) {
896       // In the case where only the sign bit is active, increase Rotate with
897       // the extension width.
898       if (RxSBG.Mask == 1 && RxSBG.Rotate == 1)
899         RxSBG.Rotate += (BitSize - InnerBitSize);
900       else
901         return false;
902     }
903 
904     RxSBG.Input = N.getOperand(0);
905     return true;
906   }
907 
908   case ISD::SHL: {
909     auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
910     if (!CountNode)
911       return false;
912 
913     uint64_t Count = CountNode->getZExtValue();
914     unsigned BitSize = N.getValueSizeInBits();
915     if (Count < 1 || Count >= BitSize)
916       return false;
917 
918     if (RxSBG.Opcode == SystemZ::RNSBG) {
919       // Treat (shl X, count) as (rotl X, size-count) as long as the bottom
920       // count bits from RxSBG.Input are ignored.
921       if (maskMatters(RxSBG, allOnes(Count)))
922         return false;
923     } else {
924       // Treat (shl X, count) as (and (rotl X, count), ~0<<count).
925       if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count) << Count))
926         return false;
927     }
928 
929     RxSBG.Rotate = (RxSBG.Rotate + Count) & 63;
930     RxSBG.Input = N.getOperand(0);
931     return true;
932   }
933 
934   case ISD::SRL:
935   case ISD::SRA: {
936     auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
937     if (!CountNode)
938       return false;
939 
940     uint64_t Count = CountNode->getZExtValue();
941     unsigned BitSize = N.getValueSizeInBits();
942     if (Count < 1 || Count >= BitSize)
943       return false;
944 
945     if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) {
946       // Treat (srl|sra X, count) as (rotl X, size-count) as long as the top
947       // count bits from RxSBG.Input are ignored.
948       if (maskMatters(RxSBG, allOnes(Count) << (BitSize - Count)))
949         return false;
950     } else {
951       // Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count),
952       // which is similar to SLL above.
953       if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count)))
954         return false;
955     }
956 
957     RxSBG.Rotate = (RxSBG.Rotate - Count) & 63;
958     RxSBG.Input = N.getOperand(0);
959     return true;
960   }
961   default:
962     return false;
963   }
964 }
965 
966 SDValue SystemZDAGToDAGISel::getUNDEF(const SDLoc &DL, EVT VT) const {
967   SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
968   return SDValue(N, 0);
969 }
970 
971 SDValue SystemZDAGToDAGISel::convertTo(const SDLoc &DL, EVT VT,
972                                        SDValue N) const {
973   if (N.getValueType() == MVT::i32 && VT == MVT::i64)
974     return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32,
975                                          DL, VT, getUNDEF(DL, MVT::i64), N);
976   if (N.getValueType() == MVT::i64 && VT == MVT::i32)
977     return CurDAG->getTargetExtractSubreg(SystemZ::subreg_l32, DL, VT, N);
978   assert(N.getValueType() == VT && "Unexpected value types");
979   return N;
980 }
981 
982 bool SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
983   SDLoc DL(N);
984   EVT VT = N->getValueType(0);
985   if (!VT.isInteger() || VT.getSizeInBits() > 64)
986     return false;
987   RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0));
988   unsigned Count = 0;
989   while (expandRxSBG(RISBG))
990     // The widening or narrowing is expected to be free.
991     // Counting widening or narrowing as a saved operation will result in
992     // preferring an R*SBG over a simple shift/logical instruction.
993     if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND &&
994         RISBG.Input.getOpcode() != ISD::TRUNCATE)
995       Count += 1;
996   if (Count == 0 || isa<ConstantSDNode>(RISBG.Input))
997     return false;
998 
999   // Prefer to use normal shift instructions over RISBG, since they can handle
1000   // all cases and are sometimes shorter.
1001   if (Count == 1 && N->getOpcode() != ISD::AND)
1002     return false;
1003 
1004   // Prefer LOAD LOGICAL INDEXED ADDRESS over RISBG in the case where we
1005   // can use its displacement to pull in an addition.
1006   if (Subtarget->hasMiscellaneousExtensions4() &&
1007       RISBG.Rotate >= 1 && RISBG.Rotate <= 4 &&
1008       RISBG.Mask == (((uint64_t)1 << 32) - 1) << RISBG.Rotate &&
1009       RISBG.Input.getOpcode() == ISD::ADD)
1010     if (auto *C = dyn_cast<ConstantSDNode>(RISBG.Input.getOperand(1)))
1011       if (isInt<20>(C->getSExtValue()))
1012         return false;
1013 
1014   // Prefer register extensions like LLC over RISBG.  Also prefer to start
1015   // out with normal ANDs if one instruction would be enough.  We can convert
1016   // these ANDs into an RISBG later if a three-address instruction is useful.
1017   if (RISBG.Rotate == 0) {
1018     bool PreferAnd = false;
1019     // Prefer AND for any 32-bit and-immediate operation.
1020     if (VT == MVT::i32)
1021       PreferAnd = true;
1022     // As well as for any 64-bit operation that can be implemented via LLC(R),
1023     // LLH(R), LLGT(R), or one of the and-immediate instructions.
1024     else if (RISBG.Mask == 0xff ||
1025              RISBG.Mask == 0xffff ||
1026              RISBG.Mask == 0x7fffffff ||
1027              SystemZ::isImmLF(~RISBG.Mask) ||
1028              SystemZ::isImmHF(~RISBG.Mask))
1029      PreferAnd = true;
1030     // And likewise for the LLZRGF instruction, which doesn't have a register
1031     // to register version.
1032     else if (auto *Load = dyn_cast<LoadSDNode>(RISBG.Input)) {
1033       if (Load->getMemoryVT() == MVT::i32 &&
1034           (Load->getExtensionType() == ISD::EXTLOAD ||
1035            Load->getExtensionType() == ISD::ZEXTLOAD) &&
1036           RISBG.Mask == 0xffffff00 &&
1037           Subtarget->hasLoadAndZeroRightmostByte())
1038       PreferAnd = true;
1039     }
1040     if (PreferAnd) {
1041       // Replace the current node with an AND.  Note that the current node
1042       // might already be that same AND, in which case it is already CSE'd
1043       // with it, and we must not call ReplaceNode.
1044       SDValue In = convertTo(DL, VT, RISBG.Input);
1045       SDValue Mask = CurDAG->getConstant(RISBG.Mask, DL, VT);
1046       SDValue New = CurDAG->getNode(ISD::AND, DL, VT, In, Mask);
1047       if (N != New.getNode()) {
1048         insertDAGNode(CurDAG, N, Mask);
1049         insertDAGNode(CurDAG, N, New);
1050         ReplaceNode(N, New.getNode());
1051         N = New.getNode();
1052       }
1053       // Now, select the machine opcode to implement this operation.
1054       if (!N->isMachineOpcode())
1055         SelectCode(N);
1056       return true;
1057     }
1058   }
1059 
1060   unsigned Opcode = SystemZ::RISBG;
1061   // Prefer RISBGN if available, since it does not clobber CC.
1062   if (Subtarget->hasMiscellaneousExtensions())
1063     Opcode = SystemZ::RISBGN;
1064   EVT OpcodeVT = MVT::i64;
1065   if (VT == MVT::i32 && Subtarget->hasHighWord() &&
1066       // We can only use the 32-bit instructions if all source bits are
1067       // in the low 32 bits without wrapping, both after rotation (because
1068       // of the smaller range for Start and End) and before rotation
1069       // (because the input value is truncated).
1070       RISBG.Start >= 32 && RISBG.End >= RISBG.Start &&
1071       ((RISBG.Start + RISBG.Rotate) & 63) >= 32 &&
1072       ((RISBG.End + RISBG.Rotate) & 63) >=
1073       ((RISBG.Start + RISBG.Rotate) & 63)) {
1074     Opcode = SystemZ::RISBMux;
1075     OpcodeVT = MVT::i32;
1076     RISBG.Start &= 31;
1077     RISBG.End &= 31;
1078   }
1079   SDValue Ops[5] = {
1080     getUNDEF(DL, OpcodeVT),
1081     convertTo(DL, OpcodeVT, RISBG.Input),
1082     CurDAG->getTargetConstant(RISBG.Start, DL, MVT::i32),
1083     CurDAG->getTargetConstant(RISBG.End | 128, DL, MVT::i32),
1084     CurDAG->getTargetConstant(RISBG.Rotate, DL, MVT::i32)
1085   };
1086   SDValue New = convertTo(
1087       DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, OpcodeVT, Ops), 0));
1088   ReplaceNode(N, New.getNode());
1089   return true;
1090 }
1091 
1092 bool SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
1093   SDLoc DL(N);
1094   EVT VT = N->getValueType(0);
1095   if (!VT.isInteger() || VT.getSizeInBits() > 64)
1096     return false;
1097   // Try treating each operand of N as the second operand of the RxSBG
1098   // and see which goes deepest.
1099   RxSBGOperands RxSBG[] = {
1100     RxSBGOperands(Opcode, N->getOperand(0)),
1101     RxSBGOperands(Opcode, N->getOperand(1))
1102   };
1103   unsigned Count[] = { 0, 0 };
1104   for (unsigned I = 0; I < 2; ++I)
1105     while (RxSBG[I].Input->hasOneUse() && expandRxSBG(RxSBG[I]))
1106       // In cases of multiple users it seems better to keep the simple
1107       // instruction as they are one cycle faster, and it also helps in cases
1108       // where both inputs share a common node.
1109       // The widening or narrowing is expected to be free.  Counting widening
1110       // or narrowing as a saved operation will result in preferring an R*SBG
1111       // over a simple shift/logical instruction.
1112       if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND &&
1113           RxSBG[I].Input.getOpcode() != ISD::TRUNCATE)
1114         Count[I] += 1;
1115 
1116   // Do nothing if neither operand is suitable.
1117   if (Count[0] == 0 && Count[1] == 0)
1118     return false;
1119 
1120   // Pick the deepest second operand.
1121   unsigned I = Count[0] > Count[1] ? 0 : 1;
1122   SDValue Op0 = N->getOperand(I ^ 1);
1123 
1124   // Prefer IC for character insertions from memory.
1125   if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0)
1126     if (auto *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
1127       if (Load->getMemoryVT() == MVT::i8)
1128         return false;
1129 
1130   // See whether we can avoid an AND in the first operand by converting
1131   // ROSBG to RISBG.
1132   if (Opcode == SystemZ::ROSBG && detectOrAndInsertion(Op0, RxSBG[I].Mask)) {
1133     Opcode = SystemZ::RISBG;
1134     // Prefer RISBGN if available, since it does not clobber CC.
1135     if (Subtarget->hasMiscellaneousExtensions())
1136       Opcode = SystemZ::RISBGN;
1137   }
1138 
1139   SDValue Ops[5] = {
1140     convertTo(DL, MVT::i64, Op0),
1141     convertTo(DL, MVT::i64, RxSBG[I].Input),
1142     CurDAG->getTargetConstant(RxSBG[I].Start, DL, MVT::i32),
1143     CurDAG->getTargetConstant(RxSBG[I].End, DL, MVT::i32),
1144     CurDAG->getTargetConstant(RxSBG[I].Rotate, DL, MVT::i32)
1145   };
1146   SDValue New = convertTo(
1147       DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, MVT::i64, Ops), 0));
1148   ReplaceNode(N, New.getNode());
1149   return true;
1150 }
1151 
1152 void SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node,
1153                                               SDValue Op0, uint64_t UpperVal,
1154                                               uint64_t LowerVal) {
1155   EVT VT = Node->getValueType(0);
1156   SDLoc DL(Node);
1157   SDValue Upper = CurDAG->getConstant(UpperVal, DL, VT);
1158   if (Op0.getNode())
1159     Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper);
1160 
1161   {
1162     // When we haven't passed in Op0, Upper will be a constant. In order to
1163     // prevent folding back to the large immediate in `Or = getNode(...)` we run
1164     // SelectCode first and end up with an opaque machine node. This means that
1165     // we need to use a handle to keep track of Upper in case it gets CSE'd by
1166     // SelectCode.
1167     //
1168     // Note that in the case where Op0 is passed in we could just call
1169     // SelectCode(Upper) later, along with the SelectCode(Or), and avoid needing
1170     // the handle at all, but it's fine to do it here.
1171     //
1172     // TODO: This is a pretty hacky way to do this. Can we do something that
1173     // doesn't require a two paragraph explanation?
1174     HandleSDNode Handle(Upper);
1175     SelectCode(Upper.getNode());
1176     Upper = Handle.getValue();
1177   }
1178 
1179   SDValue Lower = CurDAG->getConstant(LowerVal, DL, VT);
1180   SDValue Or = CurDAG->getNode(Opcode, DL, VT, Upper, Lower);
1181 
1182   ReplaceNode(Node, Or.getNode());
1183 
1184   SelectCode(Or.getNode());
1185 }
1186 
1187 void SystemZDAGToDAGISel::loadVectorConstant(
1188     const SystemZVectorConstantInfo &VCI, SDNode *Node) {
1189   assert((VCI.Opcode == SystemZISD::BYTE_MASK ||
1190           VCI.Opcode == SystemZISD::REPLICATE ||
1191           VCI.Opcode == SystemZISD::ROTATE_MASK) &&
1192          "Bad opcode!");
1193   assert(VCI.VecVT.getSizeInBits() == 128 && "Expected a vector type");
1194   EVT VT = Node->getValueType(0);
1195   SDLoc DL(Node);
1196   SmallVector<SDValue, 2> Ops;
1197   for (unsigned OpVal : VCI.OpVals)
1198     Ops.push_back(CurDAG->getTargetConstant(OpVal, DL, MVT::i32));
1199   SDValue Op = CurDAG->getNode(VCI.Opcode, DL, VCI.VecVT, Ops);
1200 
1201   if (VCI.VecVT == VT.getSimpleVT())
1202     ReplaceNode(Node, Op.getNode());
1203   else if (VT.getSizeInBits() == 128) {
1204     SDValue BitCast = CurDAG->getNode(ISD::BITCAST, DL, VT, Op);
1205     ReplaceNode(Node, BitCast.getNode());
1206     SelectCode(BitCast.getNode());
1207   } else { // float or double
1208     unsigned SubRegIdx =
1209         (VT.getSizeInBits() == 32 ? SystemZ::subreg_h32 : SystemZ::subreg_h64);
1210     ReplaceNode(
1211         Node, CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, Op).getNode());
1212   }
1213   SelectCode(Op.getNode());
1214 }
1215 
1216 SDNode *SystemZDAGToDAGISel::loadPoolVectorConstant(APInt Val, EVT VT, SDLoc DL) {
1217   SDNode *ResNode;
1218   assert (VT.getSizeInBits() == 128);
1219 
1220   SDValue CP = CurDAG->getTargetConstantPool(
1221       ConstantInt::get(Type::getInt128Ty(*CurDAG->getContext()), Val),
1222       TLI->getPointerTy(CurDAG->getDataLayout()));
1223 
1224   EVT PtrVT = CP.getValueType();
1225   SDValue Ops[] = {
1226     SDValue(CurDAG->getMachineNode(SystemZ::LARL, DL, PtrVT, CP), 0),
1227     CurDAG->getTargetConstant(0, DL, PtrVT),
1228     CurDAG->getRegister(0, PtrVT),
1229     CurDAG->getEntryNode()
1230   };
1231   ResNode = CurDAG->getMachineNode(SystemZ::VL, DL, VT, MVT::Other, Ops);
1232 
1233   // Annotate ResNode with memory operand information so that MachineInstr
1234   // queries work properly. This e.g. gives the register allocation the
1235   // required information for rematerialization.
1236   MachineFunction& MF = CurDAG->getMachineFunction();
1237   MachineMemOperand *MemOp =
1238       MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF),
1239                               MachineMemOperand::MOLoad, 16, Align(8));
1240 
1241   CurDAG->setNodeMemRefs(cast<MachineSDNode>(ResNode), {MemOp});
1242   return ResNode;
1243 }
1244 
1245 bool SystemZDAGToDAGISel::tryGather(SDNode *N, unsigned Opcode) {
1246   SDValue ElemV = N->getOperand(2);
1247   auto *ElemN = dyn_cast<ConstantSDNode>(ElemV);
1248   if (!ElemN)
1249     return false;
1250 
1251   unsigned Elem = ElemN->getZExtValue();
1252   EVT VT = N->getValueType(0);
1253   if (Elem >= VT.getVectorNumElements())
1254     return false;
1255 
1256   auto *Load = dyn_cast<LoadSDNode>(N->getOperand(1));
1257   if (!Load || !Load->hasNUsesOfValue(1, 0))
1258     return false;
1259   if (Load->getMemoryVT().getSizeInBits() !=
1260       Load->getValueType(0).getSizeInBits())
1261     return false;
1262 
1263   SDValue Base, Disp, Index;
1264   if (!selectBDVAddr12Only(Load->getBasePtr(), ElemV, Base, Disp, Index) ||
1265       Index.getValueType() != VT.changeVectorElementTypeToInteger())
1266     return false;
1267 
1268   SDLoc DL(Load);
1269   SDValue Ops[] = {
1270     N->getOperand(0), Base, Disp, Index,
1271     CurDAG->getTargetConstant(Elem, DL, MVT::i32), Load->getChain()
1272   };
1273   SDNode *Res = CurDAG->getMachineNode(Opcode, DL, VT, MVT::Other, Ops);
1274   ReplaceUses(SDValue(Load, 1), SDValue(Res, 1));
1275   ReplaceNode(N, Res);
1276   return true;
1277 }
1278 
1279 bool SystemZDAGToDAGISel::tryScatter(StoreSDNode *Store, unsigned Opcode) {
1280   SDValue Value = Store->getValue();
1281   if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1282     return false;
1283   if (Store->getMemoryVT().getSizeInBits() != Value.getValueSizeInBits())
1284     return false;
1285 
1286   SDValue ElemV = Value.getOperand(1);
1287   auto *ElemN = dyn_cast<ConstantSDNode>(ElemV);
1288   if (!ElemN)
1289     return false;
1290 
1291   SDValue Vec = Value.getOperand(0);
1292   EVT VT = Vec.getValueType();
1293   unsigned Elem = ElemN->getZExtValue();
1294   if (Elem >= VT.getVectorNumElements())
1295     return false;
1296 
1297   SDValue Base, Disp, Index;
1298   if (!selectBDVAddr12Only(Store->getBasePtr(), ElemV, Base, Disp, Index) ||
1299       Index.getValueType() != VT.changeVectorElementTypeToInteger())
1300     return false;
1301 
1302   SDLoc DL(Store);
1303   SDValue Ops[] = {
1304     Vec, Base, Disp, Index, CurDAG->getTargetConstant(Elem, DL, MVT::i32),
1305     Store->getChain()
1306   };
1307   ReplaceNode(Store, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops));
1308   return true;
1309 }
1310 
1311 // Check whether or not the chain ending in StoreNode is suitable for doing
1312 // the {load; op; store} to modify transformation.
1313 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
1314                                         SDValue StoredVal, SelectionDAG *CurDAG,
1315                                         LoadSDNode *&LoadNode,
1316                                         SDValue &InputChain) {
1317   // Is the stored value result 0 of the operation?
1318   if (StoredVal.getResNo() != 0)
1319     return false;
1320 
1321   // Are there other uses of the loaded value than the operation?
1322   if (!StoredVal.getNode()->hasNUsesOfValue(1, 0))
1323     return false;
1324 
1325   // Is the store non-extending and non-indexed?
1326   if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
1327     return false;
1328 
1329   SDValue Load = StoredVal->getOperand(0);
1330   // Is the stored value a non-extending and non-indexed load?
1331   if (!ISD::isNormalLoad(Load.getNode()))
1332     return false;
1333 
1334   // Return LoadNode by reference.
1335   LoadNode = cast<LoadSDNode>(Load);
1336 
1337   // Is store the only read of the loaded value?
1338   if (!Load.hasOneUse())
1339     return false;
1340 
1341   // Is the address of the store the same as the load?
1342   if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
1343       LoadNode->getOffset() != StoreNode->getOffset())
1344     return false;
1345 
1346   // Check if the chain is produced by the load or is a TokenFactor with
1347   // the load output chain as an operand. Return InputChain by reference.
1348   SDValue Chain = StoreNode->getChain();
1349 
1350   bool ChainCheck = false;
1351   if (Chain == Load.getValue(1)) {
1352     ChainCheck = true;
1353     InputChain = LoadNode->getChain();
1354   } else if (Chain.getOpcode() == ISD::TokenFactor) {
1355     SmallVector<SDValue, 4> ChainOps;
1356     SmallVector<const SDNode *, 4> LoopWorklist;
1357     SmallPtrSet<const SDNode *, 16> Visited;
1358     const unsigned int Max = 1024;
1359     for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
1360       SDValue Op = Chain.getOperand(i);
1361       if (Op == Load.getValue(1)) {
1362         ChainCheck = true;
1363         // Drop Load, but keep its chain. No cycle check necessary.
1364         ChainOps.push_back(Load.getOperand(0));
1365         continue;
1366       }
1367       LoopWorklist.push_back(Op.getNode());
1368       ChainOps.push_back(Op);
1369     }
1370 
1371     if (ChainCheck) {
1372       // Add the other operand of StoredVal to worklist.
1373       for (SDValue Op : StoredVal->ops())
1374         if (Op.getNode() != LoadNode)
1375           LoopWorklist.push_back(Op.getNode());
1376 
1377       // Check if Load is reachable from any of the nodes in the worklist.
1378       if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
1379                                        true))
1380         return false;
1381 
1382       // Make a new TokenFactor with all the other input chains except
1383       // for the load.
1384       InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
1385                                    MVT::Other, ChainOps);
1386     }
1387   }
1388   if (!ChainCheck)
1389     return false;
1390 
1391   return true;
1392 }
1393 
1394 // Change a chain of {load; op; store} of the same value into a simple op
1395 // through memory of that value, if the uses of the modified value and its
1396 // address are suitable.
1397 //
1398 // The tablegen pattern memory operand pattern is currently not able to match
1399 // the case where the CC on the original operation are used.
1400 //
1401 // See the equivalent routine in X86ISelDAGToDAG for further comments.
1402 bool SystemZDAGToDAGISel::tryFoldLoadStoreIntoMemOperand(SDNode *Node) {
1403   StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
1404   SDValue StoredVal = StoreNode->getOperand(1);
1405   unsigned Opc = StoredVal->getOpcode();
1406   SDLoc DL(StoreNode);
1407 
1408   // Before we try to select anything, make sure this is memory operand size
1409   // and opcode we can handle. Note that this must match the code below that
1410   // actually lowers the opcodes.
1411   EVT MemVT = StoreNode->getMemoryVT();
1412   unsigned NewOpc = 0;
1413   bool NegateOperand = false;
1414   switch (Opc) {
1415   default:
1416     return false;
1417   case SystemZISD::SSUBO:
1418     NegateOperand = true;
1419     [[fallthrough]];
1420   case SystemZISD::SADDO:
1421     if (MemVT == MVT::i32)
1422       NewOpc = SystemZ::ASI;
1423     else if (MemVT == MVT::i64)
1424       NewOpc = SystemZ::AGSI;
1425     else
1426       return false;
1427     break;
1428   case SystemZISD::USUBO:
1429     NegateOperand = true;
1430     [[fallthrough]];
1431   case SystemZISD::UADDO:
1432     if (MemVT == MVT::i32)
1433       NewOpc = SystemZ::ALSI;
1434     else if (MemVT == MVT::i64)
1435       NewOpc = SystemZ::ALGSI;
1436     else
1437       return false;
1438     break;
1439   }
1440 
1441   LoadSDNode *LoadNode = nullptr;
1442   SDValue InputChain;
1443   if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadNode,
1444                                    InputChain))
1445     return false;
1446 
1447   SDValue Operand = StoredVal.getOperand(1);
1448   auto *OperandC = dyn_cast<ConstantSDNode>(Operand);
1449   if (!OperandC)
1450     return false;
1451   auto OperandV = OperandC->getAPIntValue();
1452   if (NegateOperand)
1453     OperandV = -OperandV;
1454   if (OperandV.getSignificantBits() > 8)
1455     return false;
1456   Operand = CurDAG->getTargetConstant(OperandV, DL, MemVT);
1457 
1458   SDValue Base, Disp;
1459   if (!selectBDAddr20Only(StoreNode->getBasePtr(), Base, Disp))
1460     return false;
1461 
1462   SDValue Ops[] = { Base, Disp, Operand, InputChain };
1463   MachineSDNode *Result =
1464     CurDAG->getMachineNode(NewOpc, DL, MVT::i32, MVT::Other, Ops);
1465   CurDAG->setNodeMemRefs(
1466       Result, {StoreNode->getMemOperand(), LoadNode->getMemOperand()});
1467 
1468   ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
1469   ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
1470   CurDAG->RemoveDeadNode(Node);
1471   return true;
1472 }
1473 
1474 bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
1475                                                LoadSDNode *Load) const {
1476   // Check that the two memory operands have the same size.
1477   if (Load->getMemoryVT() != Store->getMemoryVT())
1478     return false;
1479 
1480   // Volatility stops an access from being decomposed.
1481   if (Load->isVolatile() || Store->isVolatile())
1482     return false;
1483 
1484   // There's no chance of overlap if the load is invariant.
1485   if (Load->isInvariant() && Load->isDereferenceable())
1486     return true;
1487 
1488   // Otherwise we need to check whether there's an alias.
1489   const Value *V1 = Load->getMemOperand()->getValue();
1490   const Value *V2 = Store->getMemOperand()->getValue();
1491   if (!V1 || !V2)
1492     return false;
1493 
1494   // Reject equality.
1495   uint64_t Size = Load->getMemoryVT().getStoreSize();
1496   int64_t End1 = Load->getSrcValueOffset() + Size;
1497   int64_t End2 = Store->getSrcValueOffset() + Size;
1498   if (V1 == V2 && End1 == End2)
1499     return false;
1500 
1501   return BatchAA->isNoAlias(MemoryLocation(V1, End1, Load->getAAInfo()),
1502                             MemoryLocation(V2, End2, Store->getAAInfo()));
1503 }
1504 
1505 bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
1506   auto *Store = cast<StoreSDNode>(N);
1507   auto *Load = cast<LoadSDNode>(Store->getValue());
1508 
1509   // Prefer not to use MVC if either address can use ... RELATIVE LONG
1510   // instructions.
1511   uint64_t Size = Load->getMemoryVT().getStoreSize();
1512   if (Size > 1 && Size <= 8) {
1513     // Prefer LHRL, LRL and LGRL.
1514     if (SystemZISD::isPCREL(Load->getBasePtr().getOpcode()))
1515       return false;
1516     // Prefer STHRL, STRL and STGRL.
1517     if (SystemZISD::isPCREL(Store->getBasePtr().getOpcode()))
1518       return false;
1519   }
1520 
1521   return canUseBlockOperation(Store, Load);
1522 }
1523 
1524 bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N,
1525                                                      unsigned I) const {
1526   auto *StoreA = cast<StoreSDNode>(N);
1527   auto *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
1528   auto *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
1529   return !LoadA->isVolatile() && LoadA->getMemoryVT() == LoadB->getMemoryVT() &&
1530          canUseBlockOperation(StoreA, LoadB);
1531 }
1532 
1533 bool SystemZDAGToDAGISel::storeLoadIsAligned(SDNode *N) const {
1534 
1535   auto *MemAccess = cast<MemSDNode>(N);
1536   auto *LdSt = dyn_cast<LSBaseSDNode>(MemAccess);
1537   TypeSize StoreSize = MemAccess->getMemoryVT().getStoreSize();
1538   SDValue BasePtr = MemAccess->getBasePtr();
1539   MachineMemOperand *MMO = MemAccess->getMemOperand();
1540   assert(MMO && "Expected a memory operand.");
1541 
1542   // The memory access must have a proper alignment and no index register.
1543   // Only load and store nodes have the offset operand (atomic loads do not).
1544   if (MemAccess->getAlign().value() < StoreSize ||
1545       (LdSt && !LdSt->getOffset().isUndef()))
1546     return false;
1547 
1548   // The MMO must not have an unaligned offset.
1549   if (MMO->getOffset() % StoreSize != 0)
1550     return false;
1551 
1552   // An access to GOT or the Constant Pool is aligned.
1553   if (const PseudoSourceValue *PSV = MMO->getPseudoValue())
1554     if ((PSV->isGOT() || PSV->isConstantPool()))
1555       return true;
1556 
1557   // Check the alignment of a Global Address.
1558   if (BasePtr.getNumOperands())
1559     if (GlobalAddressSDNode *GA =
1560         dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0))) {
1561       // The immediate offset must be aligned.
1562       if (GA->getOffset() % StoreSize != 0)
1563         return false;
1564 
1565       // The alignment of the symbol itself must be at least the store size.
1566       const GlobalValue *GV = GA->getGlobal();
1567       const DataLayout &DL = GV->getDataLayout();
1568       if (GV->getPointerAlignment(DL).value() < StoreSize)
1569         return false;
1570     }
1571 
1572   return true;
1573 }
1574 
1575 ISD::LoadExtType SystemZDAGToDAGISel::getLoadExtType(SDNode *N) const {
1576   ISD::LoadExtType ETy;
1577   if (auto *L = dyn_cast<LoadSDNode>(N))
1578     ETy = L->getExtensionType();
1579   else if (auto *AL = dyn_cast<AtomicSDNode>(N))
1580     ETy = AL->getExtensionType();
1581   else
1582     llvm_unreachable("Unkown load node type.");
1583   return ETy;
1584 }
1585 
1586 void SystemZDAGToDAGISel::Select(SDNode *Node) {
1587   // If we have a custom node, we already have selected!
1588   if (Node->isMachineOpcode()) {
1589     LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
1590     Node->setNodeId(-1);
1591     return;
1592   }
1593 
1594   unsigned Opcode = Node->getOpcode();
1595   switch (Opcode) {
1596   case ISD::OR:
1597     if (Node->getOperand(1).getOpcode() != ISD::Constant)
1598       if (tryRxSBG(Node, SystemZ::ROSBG))
1599         return;
1600     goto or_xor;
1601 
1602   case ISD::XOR:
1603     if (Node->getOperand(1).getOpcode() != ISD::Constant)
1604       if (tryRxSBG(Node, SystemZ::RXSBG))
1605         return;
1606     // Fall through.
1607   or_xor:
1608     // If this is a 64-bit operation in which both 32-bit halves are nonzero,
1609     // split the operation into two.  If both operands here happen to be
1610     // constant, leave this to common code to optimize.
1611     if (Node->getValueType(0) == MVT::i64 &&
1612         Node->getOperand(0).getOpcode() != ISD::Constant)
1613       if (auto *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
1614         uint64_t Val = Op1->getZExtValue();
1615         // Don't split the operation if we can match one of the combined
1616         // logical operations provided by miscellaneous-extensions-3.
1617         if (Subtarget->hasMiscellaneousExtensions3()) {
1618           unsigned ChildOpcode = Node->getOperand(0).getOpcode();
1619           // Check whether this expression matches NAND/NOR/NXOR.
1620           if (Val == (uint64_t)-1 && Opcode == ISD::XOR)
1621             if (ChildOpcode == ISD::AND || ChildOpcode == ISD::OR ||
1622                 ChildOpcode == ISD::XOR)
1623               break;
1624           // Check whether this expression matches OR-with-complement
1625           // (or matches an alternate pattern for NXOR).
1626           if (ChildOpcode == ISD::XOR) {
1627             auto Op0 = Node->getOperand(0);
1628             if (auto *Op0Op1 = dyn_cast<ConstantSDNode>(Op0->getOperand(1)))
1629               if (Op0Op1->getZExtValue() == (uint64_t)-1)
1630                 break;
1631           }
1632         }
1633         // Don't split an XOR with -1 as LCGR/AGHI is more compact.
1634         if (Opcode == ISD::XOR && Op1->isAllOnes())
1635           break;
1636         if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val)) {
1637           splitLargeImmediate(Opcode, Node, Node->getOperand(0),
1638                               Val - uint32_t(Val), uint32_t(Val));
1639           return;
1640         }
1641       }
1642     break;
1643 
1644   case ISD::AND:
1645     if (Node->getOperand(1).getOpcode() != ISD::Constant)
1646       if (tryRxSBG(Node, SystemZ::RNSBG))
1647         return;
1648     [[fallthrough]];
1649   case ISD::ROTL:
1650   case ISD::SHL:
1651   case ISD::SRL:
1652   case ISD::ZERO_EXTEND:
1653     if (tryRISBGZero(Node))
1654       return;
1655     break;
1656 
1657   case ISD::BSWAP:
1658     if (Node->getValueType(0) == MVT::i128) {
1659       SDLoc DL(Node);
1660       SDValue Src = Node->getOperand(0);
1661       Src = CurDAG->getNode(ISD::BITCAST, DL, MVT::v16i8, Src);
1662 
1663       uint64_t Bytes[2] = { 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL };
1664       SDNode *Mask = loadPoolVectorConstant(APInt(128, Bytes), MVT::v16i8, DL);
1665       SDValue Ops[] = { Src, Src, SDValue(Mask, 0) };
1666       SDValue Res = SDValue(CurDAG->getMachineNode(SystemZ::VPERM, DL,
1667                                                    MVT::v16i8, Ops), 0);
1668 
1669       Res = CurDAG->getNode(ISD::BITCAST, DL, MVT::i128, Res);
1670       SDNode *ResNode = Res.getNode();
1671       ReplaceNode(Node, ResNode);
1672       SelectCode(Src.getNode());
1673       SelectCode(ResNode);
1674       return;
1675     }
1676     break;
1677 
1678   case ISD::Constant:
1679     // If this is a 64-bit constant that is out of the range of LLILF,
1680     // LLIHF and LGFI, split it into two 32-bit pieces.
1681     if (Node->getValueType(0) == MVT::i64) {
1682       uint64_t Val = Node->getAsZExtVal();
1683       if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) {
1684         splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val),
1685                             uint32_t(Val));
1686         return;
1687       }
1688     }
1689     if (Node->getValueType(0) == MVT::i128) {
1690       const APInt &Val = Node->getAsAPIntVal();
1691       SystemZVectorConstantInfo VCI(Val);
1692       if (VCI.isVectorConstantLegal(*Subtarget)) {
1693         loadVectorConstant(VCI, Node);
1694         return;
1695       }
1696       // If we can't materialize the constant we need to use a literal pool.
1697       SDNode *ResNode = loadPoolVectorConstant(Val, MVT::i128, SDLoc(Node));
1698       ReplaceNode(Node, ResNode);
1699       return;
1700     }
1701     break;
1702 
1703   case SystemZISD::SELECT_CCMASK: {
1704     SDValue Op0 = Node->getOperand(0);
1705     SDValue Op1 = Node->getOperand(1);
1706     // Prefer to put any load first, so that it can be matched as a
1707     // conditional load.  Likewise for constants in range for LOCHI.
1708     if ((Op1.getOpcode() == ISD::LOAD && Op0.getOpcode() != ISD::LOAD) ||
1709         (Subtarget->hasLoadStoreOnCond2() &&
1710          Node->getValueType(0).isInteger() &&
1711          Node->getValueType(0).getSizeInBits() <= 64 &&
1712          Op1.getOpcode() == ISD::Constant &&
1713          isInt<16>(cast<ConstantSDNode>(Op1)->getSExtValue()) &&
1714          !(Op0.getOpcode() == ISD::Constant &&
1715            isInt<16>(cast<ConstantSDNode>(Op0)->getSExtValue())))) {
1716       SDValue CCValid = Node->getOperand(2);
1717       SDValue CCMask = Node->getOperand(3);
1718       uint64_t ConstCCValid = CCValid.getNode()->getAsZExtVal();
1719       uint64_t ConstCCMask = CCMask.getNode()->getAsZExtVal();
1720       // Invert the condition.
1721       CCMask = CurDAG->getTargetConstant(ConstCCValid ^ ConstCCMask,
1722                                          SDLoc(Node), CCMask.getValueType());
1723       SDValue Op4 = Node->getOperand(4);
1724       SDNode *UpdatedNode =
1725         CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4);
1726       if (UpdatedNode != Node) {
1727         // In case this node already exists then replace Node with it.
1728         ReplaceNode(Node, UpdatedNode);
1729         Node = UpdatedNode;
1730       }
1731     }
1732     break;
1733   }
1734 
1735   case ISD::INSERT_VECTOR_ELT: {
1736     EVT VT = Node->getValueType(0);
1737     unsigned ElemBitSize = VT.getScalarSizeInBits();
1738     if (ElemBitSize == 32) {
1739       if (tryGather(Node, SystemZ::VGEF))
1740         return;
1741     } else if (ElemBitSize == 64) {
1742       if (tryGather(Node, SystemZ::VGEG))
1743         return;
1744     }
1745     break;
1746   }
1747 
1748   case ISD::BUILD_VECTOR: {
1749     auto *BVN = cast<BuildVectorSDNode>(Node);
1750     SystemZVectorConstantInfo VCI(BVN);
1751     if (VCI.isVectorConstantLegal(*Subtarget)) {
1752       loadVectorConstant(VCI, Node);
1753       return;
1754     }
1755     break;
1756   }
1757 
1758   case ISD::ConstantFP: {
1759     APFloat Imm = cast<ConstantFPSDNode>(Node)->getValueAPF();
1760     if (Imm.isZero() || Imm.isNegZero())
1761       break;
1762     SystemZVectorConstantInfo VCI(Imm);
1763     bool Success = VCI.isVectorConstantLegal(*Subtarget); (void)Success;
1764     assert(Success && "Expected legal FP immediate");
1765     loadVectorConstant(VCI, Node);
1766     return;
1767   }
1768 
1769   case ISD::STORE: {
1770     if (tryFoldLoadStoreIntoMemOperand(Node))
1771       return;
1772     auto *Store = cast<StoreSDNode>(Node);
1773     unsigned ElemBitSize = Store->getValue().getValueSizeInBits();
1774     if (ElemBitSize == 32) {
1775       if (tryScatter(Store, SystemZ::VSCEF))
1776         return;
1777     } else if (ElemBitSize == 64) {
1778       if (tryScatter(Store, SystemZ::VSCEG))
1779         return;
1780     }
1781     break;
1782   }
1783 
1784   case ISD::ATOMIC_STORE: {
1785     auto *AtomOp = cast<AtomicSDNode>(Node);
1786     // Replace the atomic_store with a regular store and select it. This is
1787     // ok since we know all store instructions <= 8 bytes are atomic, and the
1788     // 16 byte case is already handled during lowering.
1789     StoreSDNode *St = cast<StoreSDNode>(CurDAG->getTruncStore(
1790          AtomOp->getChain(), SDLoc(AtomOp), AtomOp->getVal(),
1791          AtomOp->getBasePtr(), AtomOp->getMemoryVT(), AtomOp->getMemOperand()));
1792     assert(St->getMemOperand()->isAtomic() && "Broken MMO.");
1793     SDNode *Chain = St;
1794     // We have to enforce sequential consistency by performing a
1795     // serialization operation after the store.
1796     if (AtomOp->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent)
1797       Chain = CurDAG->getMachineNode(SystemZ::Serialize, SDLoc(AtomOp),
1798                                      MVT::Other, SDValue(Chain, 0));
1799     ReplaceNode(Node, Chain);
1800     SelectCode(St);
1801     return;
1802   }
1803   }
1804 
1805   SelectCode(Node);
1806 }
1807 
1808 bool SystemZDAGToDAGISel::SelectInlineAsmMemoryOperand(
1809     const SDValue &Op, InlineAsm::ConstraintCode ConstraintID,
1810     std::vector<SDValue> &OutOps) {
1811   SystemZAddressingMode::AddrForm Form;
1812   SystemZAddressingMode::DispRange DispRange;
1813   SDValue Base, Disp, Index;
1814 
1815   switch(ConstraintID) {
1816   default:
1817     llvm_unreachable("Unexpected asm memory constraint");
1818   case InlineAsm::ConstraintCode::i:
1819   case InlineAsm::ConstraintCode::Q:
1820   case InlineAsm::ConstraintCode::ZQ:
1821     // Accept an address with a short displacement, but no index.
1822     Form = SystemZAddressingMode::FormBD;
1823     DispRange = SystemZAddressingMode::Disp12Only;
1824     break;
1825   case InlineAsm::ConstraintCode::R:
1826   case InlineAsm::ConstraintCode::ZR:
1827     // Accept an address with a short displacement and an index.
1828     Form = SystemZAddressingMode::FormBDXNormal;
1829     DispRange = SystemZAddressingMode::Disp12Only;
1830     break;
1831   case InlineAsm::ConstraintCode::S:
1832   case InlineAsm::ConstraintCode::ZS:
1833     // Accept an address with a long displacement, but no index.
1834     Form = SystemZAddressingMode::FormBD;
1835     DispRange = SystemZAddressingMode::Disp20Only;
1836     break;
1837   case InlineAsm::ConstraintCode::T:
1838   case InlineAsm::ConstraintCode::m:
1839   case InlineAsm::ConstraintCode::o:
1840   case InlineAsm::ConstraintCode::p:
1841   case InlineAsm::ConstraintCode::ZT:
1842     // Accept an address with a long displacement and an index.
1843     // m works the same as T, as this is the most general case.
1844     // We don't really have any special handling of "offsettable"
1845     // memory addresses, so just treat o the same as m.
1846     Form = SystemZAddressingMode::FormBDXNormal;
1847     DispRange = SystemZAddressingMode::Disp20Only;
1848     break;
1849   }
1850 
1851   if (selectBDXAddr(Form, DispRange, Op, Base, Disp, Index)) {
1852     const TargetRegisterClass *TRC =
1853       Subtarget->getRegisterInfo()->getPointerRegClass(*MF);
1854     SDLoc DL(Base);
1855     SDValue RC = CurDAG->getTargetConstant(TRC->getID(), DL, MVT::i32);
1856 
1857     // Make sure that the base address doesn't go into %r0.
1858     // If it's a TargetFrameIndex or a fixed register, we shouldn't do anything.
1859     if (Base.getOpcode() != ISD::TargetFrameIndex &&
1860         Base.getOpcode() != ISD::Register) {
1861       Base =
1862         SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1863                                        DL, Base.getValueType(),
1864                                        Base, RC), 0);
1865     }
1866 
1867     // Make sure that the index register isn't assigned to %r0 either.
1868     if (Index.getOpcode() != ISD::Register) {
1869       Index =
1870         SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1871                                        DL, Index.getValueType(),
1872                                        Index, RC), 0);
1873     }
1874 
1875     OutOps.push_back(Base);
1876     OutOps.push_back(Disp);
1877     OutOps.push_back(Index);
1878     return false;
1879   }
1880 
1881   return true;
1882 }
1883 
1884 // IsProfitableToFold - Returns true if is profitable to fold the specific
1885 // operand node N of U during instruction selection that starts at Root.
1886 bool
1887 SystemZDAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
1888                                         SDNode *Root) const {
1889   // We want to avoid folding a LOAD into an ICMP node if as a result
1890   // we would be forced to spill the condition code into a GPR.
1891   if (N.getOpcode() == ISD::LOAD && U->getOpcode() == SystemZISD::ICMP) {
1892     if (!N.hasOneUse() || !U->hasOneUse())
1893       return false;
1894 
1895     // The user of the CC value will usually be a CopyToReg into the
1896     // physical CC register, which in turn is glued and chained to the
1897     // actual instruction that uses the CC value.  Bail out if we have
1898     // anything else than that.
1899     SDNode *CCUser = *U->user_begin();
1900     SDNode *CCRegUser = nullptr;
1901     if (CCUser->getOpcode() == ISD::CopyToReg ||
1902         cast<RegisterSDNode>(CCUser->getOperand(1))->getReg() == SystemZ::CC) {
1903       for (auto *U : CCUser->users()) {
1904         if (CCRegUser == nullptr)
1905           CCRegUser = U;
1906         else if (CCRegUser != U)
1907           return false;
1908       }
1909     }
1910     if (CCRegUser == nullptr)
1911       return false;
1912 
1913     // If the actual instruction is a branch, the only thing that remains to be
1914     // checked is whether the CCUser chain is a predecessor of the load.
1915     if (CCRegUser->isMachineOpcode() &&
1916         CCRegUser->getMachineOpcode() == SystemZ::BRC)
1917       return !N->isPredecessorOf(CCUser->getOperand(0).getNode());
1918 
1919     // Otherwise, the instruction may have multiple operands, and we need to
1920     // verify that none of them are a predecessor of the load.  This is exactly
1921     // the same check that would be done by common code if the CC setter were
1922     // glued to the CC user, so simply invoke that check here.
1923     if (!IsLegalToFold(N, U, CCRegUser, OptLevel, false))
1924       return false;
1925   }
1926 
1927   return true;
1928 }
1929 
1930 namespace {
1931 // Represents a sequence for extracting a 0/1 value from an IPM result:
1932 // (((X ^ XORValue) + AddValue) >> Bit)
1933 struct IPMConversion {
1934   IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
1935     : XORValue(xorValue), AddValue(addValue), Bit(bit) {}
1936 
1937   int64_t XORValue;
1938   int64_t AddValue;
1939   unsigned Bit;
1940 };
1941 } // end anonymous namespace
1942 
1943 // Return a sequence for getting a 1 from an IPM result when CC has a
1944 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
1945 // The handling of CC values outside CCValid doesn't matter.
1946 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
1947   // Deal with cases where the result can be taken directly from a bit
1948   // of the IPM result.
1949   if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
1950     return IPMConversion(0, 0, SystemZ::IPM_CC);
1951   if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
1952     return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
1953 
1954   // Deal with cases where we can add a value to force the sign bit
1955   // to contain the right value.  Putting the bit in 31 means we can
1956   // use SRL rather than RISBG(L), and also makes it easier to get a
1957   // 0/-1 value, so it has priority over the other tests below.
1958   //
1959   // These sequences rely on the fact that the upper two bits of the
1960   // IPM result are zero.
1961   uint64_t TopBit = uint64_t(1) << 31;
1962   if (CCMask == (CCValid & SystemZ::CCMASK_0))
1963     return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
1964   if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
1965     return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
1966   if (CCMask == (CCValid & (SystemZ::CCMASK_0
1967                             | SystemZ::CCMASK_1
1968                             | SystemZ::CCMASK_2)))
1969     return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
1970   if (CCMask == (CCValid & SystemZ::CCMASK_3))
1971     return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
1972   if (CCMask == (CCValid & (SystemZ::CCMASK_1
1973                             | SystemZ::CCMASK_2
1974                             | SystemZ::CCMASK_3)))
1975     return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
1976 
1977   // Next try inverting the value and testing a bit.  0/1 could be
1978   // handled this way too, but we dealt with that case above.
1979   if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
1980     return IPMConversion(-1, 0, SystemZ::IPM_CC);
1981 
1982   // Handle cases where adding a value forces a non-sign bit to contain
1983   // the right value.
1984   if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
1985     return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
1986   if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
1987     return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
1988 
1989   // The remaining cases are 1, 2, 0/1/3 and 0/2/3.  All these are
1990   // can be done by inverting the low CC bit and applying one of the
1991   // sign-based extractions above.
1992   if (CCMask == (CCValid & SystemZ::CCMASK_1))
1993     return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
1994   if (CCMask == (CCValid & SystemZ::CCMASK_2))
1995     return IPMConversion(1 << SystemZ::IPM_CC,
1996                          TopBit - (3 << SystemZ::IPM_CC), 31);
1997   if (CCMask == (CCValid & (SystemZ::CCMASK_0
1998                             | SystemZ::CCMASK_1
1999                             | SystemZ::CCMASK_3)))
2000     return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
2001   if (CCMask == (CCValid & (SystemZ::CCMASK_0
2002                             | SystemZ::CCMASK_2
2003                             | SystemZ::CCMASK_3)))
2004     return IPMConversion(1 << SystemZ::IPM_CC,
2005                          TopBit - (1 << SystemZ::IPM_CC), 31);
2006 
2007   llvm_unreachable("Unexpected CC combination");
2008 }
2009 
2010 SDValue SystemZDAGToDAGISel::expandSelectBoolean(SDNode *Node) {
2011   auto *TrueOp = dyn_cast<ConstantSDNode>(Node->getOperand(0));
2012   auto *FalseOp = dyn_cast<ConstantSDNode>(Node->getOperand(1));
2013   if (!TrueOp || !FalseOp)
2014     return SDValue();
2015   if (FalseOp->getZExtValue() != 0)
2016     return SDValue();
2017   if (TrueOp->getSExtValue() != 1 && TrueOp->getSExtValue() != -1)
2018     return SDValue();
2019 
2020   auto *CCValidOp = dyn_cast<ConstantSDNode>(Node->getOperand(2));
2021   auto *CCMaskOp = dyn_cast<ConstantSDNode>(Node->getOperand(3));
2022   if (!CCValidOp || !CCMaskOp)
2023     return SDValue();
2024   int CCValid = CCValidOp->getZExtValue();
2025   int CCMask = CCMaskOp->getZExtValue();
2026 
2027   SDLoc DL(Node);
2028   SDValue CCReg = Node->getOperand(4);
2029   IPMConversion IPM = getIPMConversion(CCValid, CCMask);
2030   SDValue Result = CurDAG->getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
2031 
2032   if (IPM.XORValue)
2033     Result = CurDAG->getNode(ISD::XOR, DL, MVT::i32, Result,
2034                              CurDAG->getConstant(IPM.XORValue, DL, MVT::i32));
2035 
2036   if (IPM.AddValue)
2037     Result =
2038         CurDAG->getNode(ISD::ADD, DL, MVT::i32, Result,
2039                         CurDAG->getSignedConstant(IPM.AddValue, DL, MVT::i32));
2040 
2041   EVT VT = Node->getValueType(0);
2042   if (VT == MVT::i32 && IPM.Bit == 31) {
2043     unsigned ShiftOp = TrueOp->getSExtValue() == 1 ? ISD::SRL : ISD::SRA;
2044     Result = CurDAG->getNode(ShiftOp, DL, MVT::i32, Result,
2045                              CurDAG->getConstant(IPM.Bit, DL, MVT::i32));
2046   } else {
2047     if (VT != MVT::i32)
2048       Result = CurDAG->getNode(ISD::ANY_EXTEND, DL, VT, Result);
2049 
2050     if (TrueOp->getSExtValue() == 1) {
2051       // The SHR/AND sequence should get optimized to an RISBG.
2052       Result = CurDAG->getNode(ISD::SRL, DL, VT, Result,
2053                                CurDAG->getConstant(IPM.Bit, DL, MVT::i32));
2054       Result = CurDAG->getNode(ISD::AND, DL, VT, Result,
2055                                CurDAG->getConstant(1, DL, VT));
2056     } else {
2057       // Sign-extend from IPM.Bit using a pair of shifts.
2058       int ShlAmt = VT.getSizeInBits() - 1 - IPM.Bit;
2059       int SraAmt = VT.getSizeInBits() - 1;
2060       Result = CurDAG->getNode(ISD::SHL, DL, VT, Result,
2061                                CurDAG->getConstant(ShlAmt, DL, MVT::i32));
2062       Result = CurDAG->getNode(ISD::SRA, DL, VT, Result,
2063                                CurDAG->getConstant(SraAmt, DL, MVT::i32));
2064     }
2065   }
2066 
2067   return Result;
2068 }
2069 
2070 bool SystemZDAGToDAGISel::shouldSelectForReassoc(SDNode *N) const {
2071   EVT VT = N->getValueType(0);
2072   assert(VT.isFloatingPoint() && "Expected FP SDNode");
2073   return N->getFlags().hasAllowReassociation() &&
2074          N->getFlags().hasNoSignedZeros() && Subtarget->hasVector() &&
2075          (VT != MVT::f32 || Subtarget->hasVectorEnhancements1()) &&
2076          !N->isStrictFPOpcode();
2077 }
2078 
2079 void SystemZDAGToDAGISel::PreprocessISelDAG() {
2080   // If we have conditional immediate loads, we always prefer
2081   // using those over an IPM sequence.
2082   if (Subtarget->hasLoadStoreOnCond2())
2083     return;
2084 
2085   bool MadeChange = false;
2086 
2087   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
2088                                        E = CurDAG->allnodes_end();
2089        I != E;) {
2090     SDNode *N = &*I++;
2091     if (N->use_empty())
2092       continue;
2093 
2094     SDValue Res;
2095     switch (N->getOpcode()) {
2096     default: break;
2097     case SystemZISD::SELECT_CCMASK:
2098       Res = expandSelectBoolean(N);
2099       break;
2100     }
2101 
2102     if (Res) {
2103       LLVM_DEBUG(dbgs() << "SystemZ DAG preprocessing replacing:\nOld:    ");
2104       LLVM_DEBUG(N->dump(CurDAG));
2105       LLVM_DEBUG(dbgs() << "\nNew: ");
2106       LLVM_DEBUG(Res.getNode()->dump(CurDAG));
2107       LLVM_DEBUG(dbgs() << "\n");
2108 
2109       CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
2110       MadeChange = true;
2111     }
2112   }
2113 
2114   if (MadeChange)
2115     CurDAG->RemoveDeadNodes();
2116 }
2117