xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/SystemZ/SystemZISelLowering.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===-- SystemZISelLowering.h - SystemZ DAG lowering interface --*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that SystemZ uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H
15 #define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H
16 
17 #include "SystemZ.h"
18 #include "SystemZInstrInfo.h"
19 #include "llvm/CodeGen/MachineBasicBlock.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 
23 namespace llvm {
24 namespace SystemZISD {
25 enum NodeType : unsigned {
26   FIRST_NUMBER = ISD::BUILTIN_OP_END,
27 
28   // Return with a flag operand.  Operand 0 is the chain operand.
29   RET_FLAG,
30 
31   // Calls a function.  Operand 0 is the chain operand and operand 1
32   // is the target address.  The arguments start at operand 2.
33   // There is an optional glue operand at the end.
34   CALL,
35   SIBCALL,
36 
37   // TLS calls.  Like regular calls, except operand 1 is the TLS symbol.
38   // (The call target is implicitly __tls_get_offset.)
39   TLS_GDCALL,
40   TLS_LDCALL,
41 
42   // Wraps a TargetGlobalAddress that should be loaded using PC-relative
43   // accesses (LARL).  Operand 0 is the address.
44   PCREL_WRAPPER,
45 
46   // Used in cases where an offset is applied to a TargetGlobalAddress.
47   // Operand 0 is the full TargetGlobalAddress and operand 1 is a
48   // PCREL_WRAPPER for an anchor point.  This is used so that we can
49   // cheaply refer to either the full address or the anchor point
50   // as a register base.
51   PCREL_OFFSET,
52 
53   // Integer comparisons.  There are three operands: the two values
54   // to compare, and an integer of type SystemZICMP.
55   ICMP,
56 
57   // Floating-point comparisons.  The two operands are the values to compare.
58   FCMP,
59 
60   // Test under mask.  The first operand is ANDed with the second operand
61   // and the condition codes are set on the result.  The third operand is
62   // a boolean that is true if the condition codes need to distinguish
63   // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the
64   // register forms do but the memory forms don't).
65   TM,
66 
67   // Branches if a condition is true.  Operand 0 is the chain operand;
68   // operand 1 is the 4-bit condition-code mask, with bit N in
69   // big-endian order meaning "branch if CC=N"; operand 2 is the
70   // target block and operand 3 is the flag operand.
71   BR_CCMASK,
72 
73   // Selects between operand 0 and operand 1.  Operand 2 is the
74   // mask of condition-code values for which operand 0 should be
75   // chosen over operand 1; it has the same form as BR_CCMASK.
76   // Operand 3 is the flag operand.
77   SELECT_CCMASK,
78 
79   // Evaluates to the gap between the stack pointer and the
80   // base of the dynamically-allocatable area.
81   ADJDYNALLOC,
82 
83   // For allocating stack space when using stack clash protector.
84   // Allocation is performed by block, and each block is probed.
85   PROBED_ALLOCA,
86 
87   // Count number of bits set in operand 0 per byte.
88   POPCNT,
89 
90   // Wrappers around the ISD opcodes of the same name.  The output is GR128.
91   // Input operands may be GR64 or GR32, depending on the instruction.
92   SMUL_LOHI,
93   UMUL_LOHI,
94   SDIVREM,
95   UDIVREM,
96 
97   // Add/subtract with overflow/carry.  These have the same operands as
98   // the corresponding standard operations, except with the carry flag
99   // replaced by a condition code value.
100   SADDO, SSUBO, UADDO, USUBO, ADDCARRY, SUBCARRY,
101 
102   // Set the condition code from a boolean value in operand 0.
103   // Operand 1 is a mask of all condition-code values that may result of this
104   // operation, operand 2 is a mask of condition-code values that may result
105   // if the boolean is true.
106   // Note that this operation is always optimized away, we will never
107   // generate any code for it.
108   GET_CCMASK,
109 
110   // Use a series of MVCs to copy bytes from one memory location to another.
111   // The operands are:
112   // - the target address
113   // - the source address
114   // - the constant length
115   //
116   // This isn't a memory opcode because we'd need to attach two
117   // MachineMemOperands rather than one.
118   MVC,
119 
120   // Like MVC, but implemented as a loop that handles X*256 bytes
121   // followed by straight-line code to handle the rest (if any).
122   // The value of X is passed as an additional operand.
123   MVC_LOOP,
124 
125   // Similar to MVC and MVC_LOOP, but for logic operations (AND, OR, XOR).
126   NC,
127   NC_LOOP,
128   OC,
129   OC_LOOP,
130   XC,
131   XC_LOOP,
132 
133   // Use CLC to compare two blocks of memory, with the same comments
134   // as for MVC and MVC_LOOP.
135   CLC,
136   CLC_LOOP,
137 
138   // Use an MVST-based sequence to implement stpcpy().
139   STPCPY,
140 
141   // Use a CLST-based sequence to implement strcmp().  The two input operands
142   // are the addresses of the strings to compare.
143   STRCMP,
144 
145   // Use an SRST-based sequence to search a block of memory.  The first
146   // operand is the end address, the second is the start, and the third
147   // is the character to search for.  CC is set to 1 on success and 2
148   // on failure.
149   SEARCH_STRING,
150 
151   // Store the CC value in bits 29 and 28 of an integer.
152   IPM,
153 
154   // Compiler barrier only; generate a no-op.
155   MEMBARRIER,
156 
157   // Transaction begin.  The first operand is the chain, the second
158   // the TDB pointer, and the third the immediate control field.
159   // Returns CC value and chain.
160   TBEGIN,
161   TBEGIN_NOFLOAT,
162 
163   // Transaction end.  Just the chain operand.  Returns CC value and chain.
164   TEND,
165 
166   // Create a vector constant by filling byte N of the result with bit
167   // 15-N of the single operand.
168   BYTE_MASK,
169 
170   // Create a vector constant by replicating an element-sized RISBG-style mask.
171   // The first operand specifies the starting set bit and the second operand
172   // specifies the ending set bit.  Both operands count from the MSB of the
173   // element.
174   ROTATE_MASK,
175 
176   // Replicate a GPR scalar value into all elements of a vector.
177   REPLICATE,
178 
179   // Create a vector from two i64 GPRs.
180   JOIN_DWORDS,
181 
182   // Replicate one element of a vector into all elements.  The first operand
183   // is the vector and the second is the index of the element to replicate.
184   SPLAT,
185 
186   // Interleave elements from the high half of operand 0 and the high half
187   // of operand 1.
188   MERGE_HIGH,
189 
190   // Likewise for the low halves.
191   MERGE_LOW,
192 
193   // Concatenate the vectors in the first two operands, shift them left
194   // by the third operand, and take the first half of the result.
195   SHL_DOUBLE,
196 
197   // Take one element of the first v2i64 operand and the one element of
198   // the second v2i64 operand and concatenate them to form a v2i64 result.
199   // The third operand is a 4-bit value of the form 0A0B, where A and B
200   // are the element selectors for the first operand and second operands
201   // respectively.
202   PERMUTE_DWORDS,
203 
204   // Perform a general vector permute on vector operands 0 and 1.
205   // Each byte of operand 2 controls the corresponding byte of the result,
206   // in the same way as a byte-level VECTOR_SHUFFLE mask.
207   PERMUTE,
208 
209   // Pack vector operands 0 and 1 into a single vector with half-sized elements.
210   PACK,
211 
212   // Likewise, but saturate the result and set CC.  PACKS_CC does signed
213   // saturation and PACKLS_CC does unsigned saturation.
214   PACKS_CC,
215   PACKLS_CC,
216 
217   // Unpack the first half of vector operand 0 into double-sized elements.
218   // UNPACK_HIGH sign-extends and UNPACKL_HIGH zero-extends.
219   UNPACK_HIGH,
220   UNPACKL_HIGH,
221 
222   // Likewise for the second half.
223   UNPACK_LOW,
224   UNPACKL_LOW,
225 
226   // Shift each element of vector operand 0 by the number of bits specified
227   // by scalar operand 1.
228   VSHL_BY_SCALAR,
229   VSRL_BY_SCALAR,
230   VSRA_BY_SCALAR,
231 
232   // For each element of the output type, sum across all sub-elements of
233   // operand 0 belonging to the corresponding element, and add in the
234   // rightmost sub-element of the corresponding element of operand 1.
235   VSUM,
236 
237   // Compare integer vector operands 0 and 1 to produce the usual 0/-1
238   // vector result.  VICMPE is for equality, VICMPH for "signed greater than"
239   // and VICMPHL for "unsigned greater than".
240   VICMPE,
241   VICMPH,
242   VICMPHL,
243 
244   // Likewise, but also set the condition codes on the result.
245   VICMPES,
246   VICMPHS,
247   VICMPHLS,
248 
249   // Compare floating-point vector operands 0 and 1 to produce the usual 0/-1
250   // vector result.  VFCMPE is for "ordered and equal", VFCMPH for "ordered and
251   // greater than" and VFCMPHE for "ordered and greater than or equal to".
252   VFCMPE,
253   VFCMPH,
254   VFCMPHE,
255 
256   // Likewise, but also set the condition codes on the result.
257   VFCMPES,
258   VFCMPHS,
259   VFCMPHES,
260 
261   // Test floating-point data class for vectors.
262   VFTCI,
263 
264   // Extend the even f32 elements of vector operand 0 to produce a vector
265   // of f64 elements.
266   VEXTEND,
267 
268   // Round the f64 elements of vector operand 0 to f32s and store them in the
269   // even elements of the result.
270   VROUND,
271 
272   // AND the two vector operands together and set CC based on the result.
273   VTM,
274 
275   // String operations that set CC as a side-effect.
276   VFAE_CC,
277   VFAEZ_CC,
278   VFEE_CC,
279   VFEEZ_CC,
280   VFENE_CC,
281   VFENEZ_CC,
282   VISTR_CC,
283   VSTRC_CC,
284   VSTRCZ_CC,
285   VSTRS_CC,
286   VSTRSZ_CC,
287 
288   // Test Data Class.
289   //
290   // Operand 0: the value to test
291   // Operand 1: the bit mask
292   TDC,
293 
294   // Strict variants of scalar floating-point comparisons.
295   // Quiet and signaling versions.
296   STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE,
297   STRICT_FCMPS,
298 
299   // Strict variants of vector floating-point comparisons.
300   // Quiet and signaling versions.
301   STRICT_VFCMPE,
302   STRICT_VFCMPH,
303   STRICT_VFCMPHE,
304   STRICT_VFCMPES,
305   STRICT_VFCMPHS,
306   STRICT_VFCMPHES,
307 
308   // Strict variants of VEXTEND and VROUND.
309   STRICT_VEXTEND,
310   STRICT_VROUND,
311 
312   // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or
313   // ATOMIC_LOAD_<op>.
314   //
315   // Operand 0: the address of the containing 32-bit-aligned field
316   // Operand 1: the second operand of <op>, in the high bits of an i32
317   //            for everything except ATOMIC_SWAPW
318   // Operand 2: how many bits to rotate the i32 left to bring the first
319   //            operand into the high bits
320   // Operand 3: the negative of operand 2, for rotating the other way
321   // Operand 4: the width of the field in bits (8 or 16)
322   ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE,
323   ATOMIC_LOADW_ADD,
324   ATOMIC_LOADW_SUB,
325   ATOMIC_LOADW_AND,
326   ATOMIC_LOADW_OR,
327   ATOMIC_LOADW_XOR,
328   ATOMIC_LOADW_NAND,
329   ATOMIC_LOADW_MIN,
330   ATOMIC_LOADW_MAX,
331   ATOMIC_LOADW_UMIN,
332   ATOMIC_LOADW_UMAX,
333 
334   // A wrapper around the inner loop of an ATOMIC_CMP_SWAP.
335   //
336   // Operand 0: the address of the containing 32-bit-aligned field
337   // Operand 1: the compare value, in the low bits of an i32
338   // Operand 2: the swap value, in the low bits of an i32
339   // Operand 3: how many bits to rotate the i32 left to bring the first
340   //            operand into the high bits
341   // Operand 4: the negative of operand 2, for rotating the other way
342   // Operand 5: the width of the field in bits (8 or 16)
343   ATOMIC_CMP_SWAPW,
344 
345   // Atomic compare-and-swap returning CC value.
346   // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
347   ATOMIC_CMP_SWAP,
348 
349   // 128-bit atomic load.
350   // Val, OUTCHAIN = ATOMIC_LOAD_128(INCHAIN, ptr)
351   ATOMIC_LOAD_128,
352 
353   // 128-bit atomic store.
354   // OUTCHAIN = ATOMIC_STORE_128(INCHAIN, val, ptr)
355   ATOMIC_STORE_128,
356 
357   // 128-bit atomic compare-and-swap.
358   // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
359   ATOMIC_CMP_SWAP_128,
360 
361   // Byte swapping load/store.  Same operands as regular load/store.
362   LRV, STRV,
363 
364   // Element swapping load/store.  Same operands as regular load/store.
365   VLER, VSTER,
366 
367   // Prefetch from the second operand using the 4-bit control code in
368   // the first operand.  The code is 1 for a load prefetch and 2 for
369   // a store prefetch.
370   PREFETCH
371 };
372 
373 // Return true if OPCODE is some kind of PC-relative address.
isPCREL(unsigned Opcode)374 inline bool isPCREL(unsigned Opcode) {
375   return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET;
376 }
377 } // end namespace SystemZISD
378 
379 namespace SystemZICMP {
380 // Describes whether an integer comparison needs to be signed or unsigned,
381 // or whether either type is OK.
382 enum {
383   Any,
384   UnsignedOnly,
385   SignedOnly
386 };
387 } // end namespace SystemZICMP
388 
389 class SystemZSubtarget;
390 class SystemZTargetMachine;
391 
392 class SystemZTargetLowering : public TargetLowering {
393 public:
394   explicit SystemZTargetLowering(const TargetMachine &TM,
395                                  const SystemZSubtarget &STI);
396 
397   bool useSoftFloat() const override;
398 
399   // Override TargetLowering.
getScalarShiftAmountTy(const DataLayout &,EVT)400   MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
401     return MVT::i32;
402   }
getVectorIdxTy(const DataLayout & DL)403   MVT getVectorIdxTy(const DataLayout &DL) const override {
404     // Only the lower 12 bits of an element index are used, so we don't
405     // want to clobber the upper 32 bits of a GPR unnecessarily.
406     return MVT::i32;
407   }
getPreferredVectorAction(MVT VT)408   TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)
409     const override {
410     // Widen subvectors to the full width rather than promoting integer
411     // elements.  This is better because:
412     //
413     // (a) it means that we can handle the ABI for passing and returning
414     //     sub-128 vectors without having to handle them as legal types.
415     //
416     // (b) we don't have instructions to extend on load and truncate on store,
417     //     so promoting the integers is less efficient.
418     //
419     // (c) there are no multiplication instructions for the widest integer
420     //     type (v2i64).
421     if (VT.getScalarSizeInBits() % 8 == 0)
422       return TypeWidenVector;
423     return TargetLoweringBase::getPreferredVectorAction(VT);
424   }
isCheapToSpeculateCtlz()425   bool isCheapToSpeculateCtlz() const override { return true; }
426   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &,
427                          EVT) const override;
428   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
429                                   EVT VT) const override;
430   bool isFPImmLegal(const APFloat &Imm, EVT VT,
431                     bool ForCodeSize) const override;
432   bool hasInlineStackProbe(MachineFunction &MF) const override;
433   bool isLegalICmpImmediate(int64_t Imm) const override;
434   bool isLegalAddImmediate(int64_t Imm) const override;
435   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
436                              unsigned AS,
437                              Instruction *I = nullptr) const override;
438   bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment,
439                                       MachineMemOperand::Flags Flags,
440                                       bool *Fast) const override;
441   bool isTruncateFree(Type *, Type *) const override;
isTruncateFree(EVT,EVT)442   bool isTruncateFree(EVT, EVT) const override;
443 
444   bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
445                             bool MathUsed) const override {
446     // Form add and sub with overflow intrinsics regardless of any extra
447     // users of the math result.
448     return VT == MVT::i32 || VT == MVT::i64;
449   }
450 
451   const char *getTargetNodeName(unsigned Opcode) const override;
452   std::pair<unsigned, const TargetRegisterClass *>
453   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
454                                StringRef Constraint, MVT VT) const override;
455   TargetLowering::ConstraintType
456   getConstraintType(StringRef Constraint) const override;
457   TargetLowering::ConstraintWeight
458     getSingleConstraintMatchWeight(AsmOperandInfo &info,
459                                    const char *constraint) const override;
460   void LowerAsmOperandForConstraint(SDValue Op,
461                                     std::string &Constraint,
462                                     std::vector<SDValue> &Ops,
463                                     SelectionDAG &DAG) const override;
464 
getInlineAsmMemConstraint(StringRef ConstraintCode)465   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
466     if (ConstraintCode.size() == 1) {
467       switch(ConstraintCode[0]) {
468       default:
469         break;
470       case 'o':
471         return InlineAsm::Constraint_o;
472       case 'Q':
473         return InlineAsm::Constraint_Q;
474       case 'R':
475         return InlineAsm::Constraint_R;
476       case 'S':
477         return InlineAsm::Constraint_S;
478       case 'T':
479         return InlineAsm::Constraint_T;
480       }
481     }
482     return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
483   }
484 
485   Register getRegisterByName(const char *RegName, LLT VT,
486                              const MachineFunction &MF) const override;
487 
488   /// If a physical register, this returns the register that receives the
489   /// exception address on entry to an EH pad.
490   Register
getExceptionPointerRegister(const Constant * PersonalityFn)491   getExceptionPointerRegister(const Constant *PersonalityFn) const override {
492     return SystemZ::R6D;
493   }
494 
495   /// If a physical register, this returns the register that receives the
496   /// exception typeid on entry to a landing pad.
497   Register
getExceptionSelectorRegister(const Constant * PersonalityFn)498   getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
499     return SystemZ::R7D;
500   }
501 
502   /// Override to support customized stack guard loading.
useLoadStackGuardNode()503   bool useLoadStackGuardNode() const override {
504     return true;
505   }
insertSSPDeclarations(Module & M)506   void insertSSPDeclarations(Module &M) const override {
507   }
508 
509   MachineBasicBlock *
510   EmitInstrWithCustomInserter(MachineInstr &MI,
511                               MachineBasicBlock *BB) const override;
512   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
513   void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results,
514                              SelectionDAG &DAG) const override;
515   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
516                           SelectionDAG &DAG) const override;
517   const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
518   bool allowTruncateForTailCall(Type *, Type *) const override;
519   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
520   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
521                                bool isVarArg,
522                                const SmallVectorImpl<ISD::InputArg> &Ins,
523                                const SDLoc &DL, SelectionDAG &DAG,
524                                SmallVectorImpl<SDValue> &InVals) const override;
525   SDValue LowerCall(CallLoweringInfo &CLI,
526                     SmallVectorImpl<SDValue> &InVals) const override;
527 
528   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
529                       bool isVarArg,
530                       const SmallVectorImpl<ISD::OutputArg> &Outs,
531                       LLVMContext &Context) const override;
532   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
533                       const SmallVectorImpl<ISD::OutputArg> &Outs,
534                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
535                       SelectionDAG &DAG) const override;
536   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
537 
538   /// Determine which of the bits specified in Mask are known to be either
539   /// zero or one and return them in the KnownZero/KnownOne bitsets.
540   void computeKnownBitsForTargetNode(const SDValue Op,
541                                      KnownBits &Known,
542                                      const APInt &DemandedElts,
543                                      const SelectionDAG &DAG,
544                                      unsigned Depth = 0) const override;
545 
546   /// Determine the number of bits in the operation that are sign bits.
547   unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
548                                            const APInt &DemandedElts,
549                                            const SelectionDAG &DAG,
550                                            unsigned Depth) const override;
551 
getExtendForAtomicOps()552   ISD::NodeType getExtendForAtomicOps() const override {
553     return ISD::ZERO_EXTEND;
554   }
getExtendForAtomicCmpSwapArg()555   ISD::NodeType getExtendForAtomicCmpSwapArg() const override {
556     return ISD::ZERO_EXTEND;
557   }
558 
supportSwiftError()559   bool supportSwiftError() const override {
560     return true;
561   }
562 
563   unsigned getStackProbeSize(MachineFunction &MF) const;
564 
565 private:
566   const SystemZSubtarget &Subtarget;
567 
568   // Implement LowerOperation for individual opcodes.
569   SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
570                        const SDLoc &DL, EVT VT,
571                        SDValue CmpOp0, SDValue CmpOp1, SDValue Chain) const;
572   SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL,
573                            EVT VT, ISD::CondCode CC,
574                            SDValue CmpOp0, SDValue CmpOp1,
575                            SDValue Chain = SDValue(),
576                            bool IsSignaling = false) const;
577   SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const;
578   SDValue lowerSTRICT_FSETCC(SDValue Op, SelectionDAG &DAG,
579                              bool IsSignaling) const;
580   SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
581   SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
582   SDValue lowerGlobalAddress(GlobalAddressSDNode *Node,
583                              SelectionDAG &DAG) const;
584   SDValue lowerTLSGetOffset(GlobalAddressSDNode *Node,
585                             SelectionDAG &DAG, unsigned Opcode,
586                             SDValue GOTOffset) const;
587   SDValue lowerThreadPointer(const SDLoc &DL, SelectionDAG &DAG) const;
588   SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
589                                 SelectionDAG &DAG) const;
590   SDValue lowerBlockAddress(BlockAddressSDNode *Node,
591                             SelectionDAG &DAG) const;
592   SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const;
593   SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const;
594   SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
595   SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
596   SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
597   SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
598   SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
599   SDValue lowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
600   SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
601   SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
602   SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
603   SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
604   SDValue lowerXALUO(SDValue Op, SelectionDAG &DAG) const;
605   SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) const;
606   SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
607   SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const;
608   SDValue lowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
609   SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
610   SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
611   SDValue lowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
612   SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG,
613                               unsigned Opcode) const;
614   SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
615   SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
616   SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
617   SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
618   SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
619   SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
620   SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
621   bool isVectorElementLoad(SDValue Op) const;
622   SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
623                       SmallVectorImpl<SDValue> &Elems) const;
624   SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
625   SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
626   SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
627   SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
628   SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
629   SDValue lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
630   SDValue lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const;
631   SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const;
632 
633   bool canTreatAsByteVector(EVT VT) const;
634   SDValue combineExtract(const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp,
635                          unsigned Index, DAGCombinerInfo &DCI,
636                          bool Force) const;
637   SDValue combineTruncateExtract(const SDLoc &DL, EVT TruncVT, SDValue Op,
638                                  DAGCombinerInfo &DCI) const;
639   SDValue combineZERO_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
640   SDValue combineSIGN_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
641   SDValue combineSIGN_EXTEND_INREG(SDNode *N, DAGCombinerInfo &DCI) const;
642   SDValue combineMERGE(SDNode *N, DAGCombinerInfo &DCI) const;
643   bool canLoadStoreByteSwapped(EVT VT) const;
644   SDValue combineLOAD(SDNode *N, DAGCombinerInfo &DCI) const;
645   SDValue combineSTORE(SDNode *N, DAGCombinerInfo &DCI) const;
646   SDValue combineVECTOR_SHUFFLE(SDNode *N, DAGCombinerInfo &DCI) const;
647   SDValue combineEXTRACT_VECTOR_ELT(SDNode *N, DAGCombinerInfo &DCI) const;
648   SDValue combineJOIN_DWORDS(SDNode *N, DAGCombinerInfo &DCI) const;
649   SDValue combineFP_ROUND(SDNode *N, DAGCombinerInfo &DCI) const;
650   SDValue combineFP_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const;
651   SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const;
652   SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const;
653   SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
654   SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
655   SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const;
656   SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const;
657   SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const;
658 
659   SDValue unwrapAddress(SDValue N) const override;
660 
661   // If the last instruction before MBBI in MBB was some form of COMPARE,
662   // try to replace it with a COMPARE AND BRANCH just before MBBI.
663   // CCMask and Target are the BRC-like operands for the branch.
664   // Return true if the change was made.
665   bool convertPrevCompareToBranch(MachineBasicBlock *MBB,
666                                   MachineBasicBlock::iterator MBBI,
667                                   unsigned CCMask,
668                                   MachineBasicBlock *Target) const;
669 
670   // Implement EmitInstrWithCustomInserter for individual operation types.
671   MachineBasicBlock *emitSelect(MachineInstr &MI, MachineBasicBlock *BB) const;
672   MachineBasicBlock *emitCondStore(MachineInstr &MI, MachineBasicBlock *BB,
673                                    unsigned StoreOpcode, unsigned STOCOpcode,
674                                    bool Invert) const;
675   MachineBasicBlock *emitPair128(MachineInstr &MI,
676                                  MachineBasicBlock *MBB) const;
677   MachineBasicBlock *emitExt128(MachineInstr &MI, MachineBasicBlock *MBB,
678                                 bool ClearEven) const;
679   MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI,
680                                           MachineBasicBlock *BB,
681                                           unsigned BinOpcode, unsigned BitSize,
682                                           bool Invert = false) const;
683   MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr &MI,
684                                           MachineBasicBlock *MBB,
685                                           unsigned CompareOpcode,
686                                           unsigned KeepOldMask,
687                                           unsigned BitSize) const;
688   MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr &MI,
689                                         MachineBasicBlock *BB) const;
690   MachineBasicBlock *emitMemMemWrapper(MachineInstr &MI, MachineBasicBlock *BB,
691                                        unsigned Opcode) const;
692   MachineBasicBlock *emitStringWrapper(MachineInstr &MI, MachineBasicBlock *BB,
693                                        unsigned Opcode) const;
694   MachineBasicBlock *emitTransactionBegin(MachineInstr &MI,
695                                           MachineBasicBlock *MBB,
696                                           unsigned Opcode, bool NoFloat) const;
697   MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr &MI,
698                                          MachineBasicBlock *MBB,
699                                          unsigned Opcode) const;
700   MachineBasicBlock *emitProbedAlloca(MachineInstr &MI,
701                                       MachineBasicBlock *MBB) const;
702 
703   SDValue getBackchainAddress(SDValue SP, SelectionDAG &DAG) const;
704 
705   MachineMemOperand::Flags
706   getTargetMMOFlags(const Instruction &I) const override;
707   const TargetRegisterClass *getRepRegClassFor(MVT VT) const override;
708 };
709 
710 struct SystemZVectorConstantInfo {
711 private:
712   APInt IntBits;             // The 128 bits as an integer.
713   APInt SplatBits;           // Smallest splat value.
714   APInt SplatUndef;          // Bits correspoding to undef operands of the BVN.
715   unsigned SplatBitSize = 0;
716   bool isFP128 = false;
717 
718 public:
719   unsigned Opcode = 0;
720   SmallVector<unsigned, 2> OpVals;
721   MVT VecVT;
722   SystemZVectorConstantInfo(APFloat FPImm);
723   SystemZVectorConstantInfo(BuildVectorSDNode *BVN);
724   bool isVectorConstantLegal(const SystemZSubtarget &Subtarget);
725 };
726 
727 } // end namespace llvm
728 
729 #endif
730