xref: /freebsd-src/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp (revision 753f127f3ace09432b2baeffd71a308760641a62)
1 //===- LegalizeVectorOps.cpp - Implement SelectionDAG::LegalizeVectors ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the SelectionDAG::LegalizeVectors method.
10 //
11 // The vector legalizer looks for vector operations which might need to be
12 // scalarized and legalizes them. This is a separate step from Legalize because
13 // scalarizing can introduce illegal types.  For example, suppose we have an
14 // ISD::SDIV of type v2i64 on x86-32.  The type is legal (for example, addition
15 // on a v2i64 is legal), but ISD::SDIV isn't legal, so we have to unroll the
16 // operation, which introduces nodes with the illegal type i64 which must be
17 // expanded.  Similarly, suppose we have an ISD::SRA of type v16i8 on PowerPC;
18 // the operation must be unrolled, which introduces nodes with the illegal
19 // type i8 which must be promoted.
20 //
21 // This does not legalize vector manipulations like ISD::BUILD_VECTOR,
22 // or operations that happen to take a vector which are custom-lowered;
23 // the legalization for such operations never produces nodes
24 // with illegal types, so it's okay to put off legalizing them until
25 // SelectionDAG::Legalize runs.
26 //
27 //===----------------------------------------------------------------------===//
28 
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/CodeGen/ISDOpcodes.h"
32 #include "llvm/CodeGen/SelectionDAG.h"
33 #include "llvm/CodeGen/SelectionDAGNodes.h"
34 #include "llvm/CodeGen/TargetLowering.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/Support/Casting.h"
38 #include "llvm/Support/Compiler.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/MachineValueType.h"
42 #include <cassert>
43 #include <cstdint>
44 #include <iterator>
45 #include <utility>
46 
47 using namespace llvm;
48 
49 #define DEBUG_TYPE "legalizevectorops"
50 
51 namespace {
52 
53 class VectorLegalizer {
54   SelectionDAG& DAG;
55   const TargetLowering &TLI;
56   bool Changed = false; // Keep track of whether anything changed
57 
58   /// For nodes that are of legal width, and that have more than one use, this
59   /// map indicates what regularized operand to use.  This allows us to avoid
60   /// legalizing the same thing more than once.
61   SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes;
62 
63   /// Adds a node to the translation cache.
64   void AddLegalizedOperand(SDValue From, SDValue To) {
65     LegalizedNodes.insert(std::make_pair(From, To));
66     // If someone requests legalization of the new node, return itself.
67     if (From != To)
68       LegalizedNodes.insert(std::make_pair(To, To));
69   }
70 
71   /// Legalizes the given node.
72   SDValue LegalizeOp(SDValue Op);
73 
74   /// Assuming the node is legal, "legalize" the results.
75   SDValue TranslateLegalizeResults(SDValue Op, SDNode *Result);
76 
77   /// Make sure Results are legal and update the translation cache.
78   SDValue RecursivelyLegalizeResults(SDValue Op,
79                                      MutableArrayRef<SDValue> Results);
80 
81   /// Wrapper to interface LowerOperation with a vector of Results.
82   /// Returns false if the target wants to use default expansion. Otherwise
83   /// returns true. If return is true and the Results are empty, then the
84   /// target wants to keep the input node as is.
85   bool LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results);
86 
87   /// Implements unrolling a VSETCC.
88   SDValue UnrollVSETCC(SDNode *Node);
89 
90   /// Implement expand-based legalization of vector operations.
91   ///
92   /// This is just a high-level routine to dispatch to specific code paths for
93   /// operations to legalize them.
94   void Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results);
95 
96   /// Implements expansion for FP_TO_UINT; falls back to UnrollVectorOp if
97   /// FP_TO_SINT isn't legal.
98   void ExpandFP_TO_UINT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
99 
100   /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if
101   /// SINT_TO_FLOAT and SHR on vectors isn't legal.
102   void ExpandUINT_TO_FLOAT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
103 
104   /// Implement expansion for SIGN_EXTEND_INREG using SRL and SRA.
105   SDValue ExpandSEXTINREG(SDNode *Node);
106 
107   /// Implement expansion for ANY_EXTEND_VECTOR_INREG.
108   ///
109   /// Shuffles the low lanes of the operand into place and bitcasts to the proper
110   /// type. The contents of the bits in the extended part of each element are
111   /// undef.
112   SDValue ExpandANY_EXTEND_VECTOR_INREG(SDNode *Node);
113 
114   /// Implement expansion for SIGN_EXTEND_VECTOR_INREG.
115   ///
116   /// Shuffles the low lanes of the operand into place, bitcasts to the proper
117   /// type, then shifts left and arithmetic shifts right to introduce a sign
118   /// extension.
119   SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDNode *Node);
120 
121   /// Implement expansion for ZERO_EXTEND_VECTOR_INREG.
122   ///
123   /// Shuffles the low lanes of the operand into place and blends zeros into
124   /// the remaining lanes, finally bitcasting to the proper type.
125   SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDNode *Node);
126 
127   /// Expand bswap of vectors into a shuffle if legal.
128   SDValue ExpandBSWAP(SDNode *Node);
129 
130   /// Implement vselect in terms of XOR, AND, OR when blend is not
131   /// supported by the target.
132   SDValue ExpandVSELECT(SDNode *Node);
133   SDValue ExpandVP_SELECT(SDNode *Node);
134   SDValue ExpandVP_MERGE(SDNode *Node);
135   SDValue ExpandSELECT(SDNode *Node);
136   std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
137   SDValue ExpandStore(SDNode *N);
138   SDValue ExpandFNEG(SDNode *Node);
139   void ExpandFSUB(SDNode *Node, SmallVectorImpl<SDValue> &Results);
140   void ExpandSETCC(SDNode *Node, SmallVectorImpl<SDValue> &Results);
141   void ExpandBITREVERSE(SDNode *Node, SmallVectorImpl<SDValue> &Results);
142   void ExpandUADDSUBO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
143   void ExpandSADDSUBO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
144   void ExpandMULO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
145   void ExpandFixedPointDiv(SDNode *Node, SmallVectorImpl<SDValue> &Results);
146   void ExpandStrictFPOp(SDNode *Node, SmallVectorImpl<SDValue> &Results);
147   void ExpandREM(SDNode *Node, SmallVectorImpl<SDValue> &Results);
148 
149   void UnrollStrictFPOp(SDNode *Node, SmallVectorImpl<SDValue> &Results);
150 
151   /// Implements vector promotion.
152   ///
153   /// This is essentially just bitcasting the operands to a different type and
154   /// bitcasting the result back to the original type.
155   void Promote(SDNode *Node, SmallVectorImpl<SDValue> &Results);
156 
157   /// Implements [SU]INT_TO_FP vector promotion.
158   ///
159   /// This is a [zs]ext of the input operand to a larger integer type.
160   void PromoteINT_TO_FP(SDNode *Node, SmallVectorImpl<SDValue> &Results);
161 
162   /// Implements FP_TO_[SU]INT vector promotion of the result type.
163   ///
164   /// It is promoted to a larger integer type.  The result is then
165   /// truncated back to the original type.
166   void PromoteFP_TO_INT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
167 
168 public:
169   VectorLegalizer(SelectionDAG& dag) :
170       DAG(dag), TLI(dag.getTargetLoweringInfo()) {}
171 
172   /// Begin legalizer the vector operations in the DAG.
173   bool Run();
174 };
175 
176 } // end anonymous namespace
177 
178 bool VectorLegalizer::Run() {
179   // Before we start legalizing vector nodes, check if there are any vectors.
180   bool HasVectors = false;
181   for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
182        E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) {
183     // Check if the values of the nodes contain vectors. We don't need to check
184     // the operands because we are going to check their values at some point.
185     HasVectors = llvm::any_of(I->values(), [](EVT T) { return T.isVector(); });
186 
187     // If we found a vector node we can start the legalization.
188     if (HasVectors)
189       break;
190   }
191 
192   // If this basic block has no vectors then no need to legalize vectors.
193   if (!HasVectors)
194     return false;
195 
196   // The legalize process is inherently a bottom-up recursive process (users
197   // legalize their uses before themselves).  Given infinite stack space, we
198   // could just start legalizing on the root and traverse the whole graph.  In
199   // practice however, this causes us to run out of stack space on large basic
200   // blocks.  To avoid this problem, compute an ordering of the nodes where each
201   // node is only legalized after all of its operands are legalized.
202   DAG.AssignTopologicalOrder();
203   for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
204        E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I)
205     LegalizeOp(SDValue(&*I, 0));
206 
207   // Finally, it's possible the root changed.  Get the new root.
208   SDValue OldRoot = DAG.getRoot();
209   assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?");
210   DAG.setRoot(LegalizedNodes[OldRoot]);
211 
212   LegalizedNodes.clear();
213 
214   // Remove dead nodes now.
215   DAG.RemoveDeadNodes();
216 
217   return Changed;
218 }
219 
220 SDValue VectorLegalizer::TranslateLegalizeResults(SDValue Op, SDNode *Result) {
221   assert(Op->getNumValues() == Result->getNumValues() &&
222          "Unexpected number of results");
223   // Generic legalization: just pass the operand through.
224   for (unsigned i = 0, e = Op->getNumValues(); i != e; ++i)
225     AddLegalizedOperand(Op.getValue(i), SDValue(Result, i));
226   return SDValue(Result, Op.getResNo());
227 }
228 
229 SDValue
230 VectorLegalizer::RecursivelyLegalizeResults(SDValue Op,
231                                             MutableArrayRef<SDValue> Results) {
232   assert(Results.size() == Op->getNumValues() &&
233          "Unexpected number of results");
234   // Make sure that the generated code is itself legal.
235   for (unsigned i = 0, e = Results.size(); i != e; ++i) {
236     Results[i] = LegalizeOp(Results[i]);
237     AddLegalizedOperand(Op.getValue(i), Results[i]);
238   }
239 
240   return Results[Op.getResNo()];
241 }
242 
243 SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
244   // Note that LegalizeOp may be reentered even from single-use nodes, which
245   // means that we always must cache transformed nodes.
246   DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op);
247   if (I != LegalizedNodes.end()) return I->second;
248 
249   // Legalize the operands
250   SmallVector<SDValue, 8> Ops;
251   for (const SDValue &Oper : Op->op_values())
252     Ops.push_back(LegalizeOp(Oper));
253 
254   SDNode *Node = DAG.UpdateNodeOperands(Op.getNode(), Ops);
255 
256   bool HasVectorValueOrOp =
257       llvm::any_of(Node->values(), [](EVT T) { return T.isVector(); }) ||
258       llvm::any_of(Node->op_values(),
259                    [](SDValue O) { return O.getValueType().isVector(); });
260   if (!HasVectorValueOrOp)
261     return TranslateLegalizeResults(Op, Node);
262 
263   TargetLowering::LegalizeAction Action = TargetLowering::Legal;
264   EVT ValVT;
265   switch (Op.getOpcode()) {
266   default:
267     return TranslateLegalizeResults(Op, Node);
268   case ISD::LOAD: {
269     LoadSDNode *LD = cast<LoadSDNode>(Node);
270     ISD::LoadExtType ExtType = LD->getExtensionType();
271     EVT LoadedVT = LD->getMemoryVT();
272     if (LoadedVT.isVector() && ExtType != ISD::NON_EXTLOAD)
273       Action = TLI.getLoadExtAction(ExtType, LD->getValueType(0), LoadedVT);
274     break;
275   }
276   case ISD::STORE: {
277     StoreSDNode *ST = cast<StoreSDNode>(Node);
278     EVT StVT = ST->getMemoryVT();
279     MVT ValVT = ST->getValue().getSimpleValueType();
280     if (StVT.isVector() && ST->isTruncatingStore())
281       Action = TLI.getTruncStoreAction(ValVT, StVT);
282     break;
283   }
284   case ISD::MERGE_VALUES:
285     Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
286     // This operation lies about being legal: when it claims to be legal,
287     // it should actually be expanded.
288     if (Action == TargetLowering::Legal)
289       Action = TargetLowering::Expand;
290     break;
291 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
292   case ISD::STRICT_##DAGN:
293 #include "llvm/IR/ConstrainedOps.def"
294     ValVT = Node->getValueType(0);
295     if (Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
296         Op.getOpcode() == ISD::STRICT_UINT_TO_FP)
297       ValVT = Node->getOperand(1).getValueType();
298     Action = TLI.getOperationAction(Node->getOpcode(), ValVT);
299     // If we're asked to expand a strict vector floating-point operation,
300     // by default we're going to simply unroll it.  That is usually the
301     // best approach, except in the case where the resulting strict (scalar)
302     // operations would themselves use the fallback mutation to non-strict.
303     // In that specific case, just do the fallback on the vector op.
304     if (Action == TargetLowering::Expand && !TLI.isStrictFPEnabled() &&
305         TLI.getStrictFPOperationAction(Node->getOpcode(), ValVT) ==
306             TargetLowering::Legal) {
307       EVT EltVT = ValVT.getVectorElementType();
308       if (TLI.getOperationAction(Node->getOpcode(), EltVT)
309           == TargetLowering::Expand &&
310           TLI.getStrictFPOperationAction(Node->getOpcode(), EltVT)
311           == TargetLowering::Legal)
312         Action = TargetLowering::Legal;
313     }
314     break;
315   case ISD::ADD:
316   case ISD::SUB:
317   case ISD::MUL:
318   case ISD::MULHS:
319   case ISD::MULHU:
320   case ISD::SDIV:
321   case ISD::UDIV:
322   case ISD::SREM:
323   case ISD::UREM:
324   case ISD::SDIVREM:
325   case ISD::UDIVREM:
326   case ISD::FADD:
327   case ISD::FSUB:
328   case ISD::FMUL:
329   case ISD::FDIV:
330   case ISD::FREM:
331   case ISD::AND:
332   case ISD::OR:
333   case ISD::XOR:
334   case ISD::SHL:
335   case ISD::SRA:
336   case ISD::SRL:
337   case ISD::FSHL:
338   case ISD::FSHR:
339   case ISD::ROTL:
340   case ISD::ROTR:
341   case ISD::ABS:
342   case ISD::BSWAP:
343   case ISD::BITREVERSE:
344   case ISD::CTLZ:
345   case ISD::CTTZ:
346   case ISD::CTLZ_ZERO_UNDEF:
347   case ISD::CTTZ_ZERO_UNDEF:
348   case ISD::CTPOP:
349   case ISD::SELECT:
350   case ISD::VSELECT:
351   case ISD::SELECT_CC:
352   case ISD::ZERO_EXTEND:
353   case ISD::ANY_EXTEND:
354   case ISD::TRUNCATE:
355   case ISD::SIGN_EXTEND:
356   case ISD::FP_TO_SINT:
357   case ISD::FP_TO_UINT:
358   case ISD::FNEG:
359   case ISD::FABS:
360   case ISD::FMINNUM:
361   case ISD::FMAXNUM:
362   case ISD::FMINNUM_IEEE:
363   case ISD::FMAXNUM_IEEE:
364   case ISD::FMINIMUM:
365   case ISD::FMAXIMUM:
366   case ISD::FCOPYSIGN:
367   case ISD::FSQRT:
368   case ISD::FSIN:
369   case ISD::FCOS:
370   case ISD::FPOWI:
371   case ISD::FPOW:
372   case ISD::FLOG:
373   case ISD::FLOG2:
374   case ISD::FLOG10:
375   case ISD::FEXP:
376   case ISD::FEXP2:
377   case ISD::FCEIL:
378   case ISD::FTRUNC:
379   case ISD::FRINT:
380   case ISD::FNEARBYINT:
381   case ISD::FROUND:
382   case ISD::FROUNDEVEN:
383   case ISD::FFLOOR:
384   case ISD::FP_ROUND:
385   case ISD::FP_EXTEND:
386   case ISD::FMA:
387   case ISD::SIGN_EXTEND_INREG:
388   case ISD::ANY_EXTEND_VECTOR_INREG:
389   case ISD::SIGN_EXTEND_VECTOR_INREG:
390   case ISD::ZERO_EXTEND_VECTOR_INREG:
391   case ISD::SMIN:
392   case ISD::SMAX:
393   case ISD::UMIN:
394   case ISD::UMAX:
395   case ISD::SMUL_LOHI:
396   case ISD::UMUL_LOHI:
397   case ISD::SADDO:
398   case ISD::UADDO:
399   case ISD::SSUBO:
400   case ISD::USUBO:
401   case ISD::SMULO:
402   case ISD::UMULO:
403   case ISD::FCANONICALIZE:
404   case ISD::SADDSAT:
405   case ISD::UADDSAT:
406   case ISD::SSUBSAT:
407   case ISD::USUBSAT:
408   case ISD::SSHLSAT:
409   case ISD::USHLSAT:
410   case ISD::FP_TO_SINT_SAT:
411   case ISD::FP_TO_UINT_SAT:
412   case ISD::MGATHER:
413     Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
414     break;
415   case ISD::SMULFIX:
416   case ISD::SMULFIXSAT:
417   case ISD::UMULFIX:
418   case ISD::UMULFIXSAT:
419   case ISD::SDIVFIX:
420   case ISD::SDIVFIXSAT:
421   case ISD::UDIVFIX:
422   case ISD::UDIVFIXSAT: {
423     unsigned Scale = Node->getConstantOperandVal(2);
424     Action = TLI.getFixedPointOperationAction(Node->getOpcode(),
425                                               Node->getValueType(0), Scale);
426     break;
427   }
428   case ISD::SINT_TO_FP:
429   case ISD::UINT_TO_FP:
430   case ISD::VECREDUCE_ADD:
431   case ISD::VECREDUCE_MUL:
432   case ISD::VECREDUCE_AND:
433   case ISD::VECREDUCE_OR:
434   case ISD::VECREDUCE_XOR:
435   case ISD::VECREDUCE_SMAX:
436   case ISD::VECREDUCE_SMIN:
437   case ISD::VECREDUCE_UMAX:
438   case ISD::VECREDUCE_UMIN:
439   case ISD::VECREDUCE_FADD:
440   case ISD::VECREDUCE_FMUL:
441   case ISD::VECREDUCE_FMAX:
442   case ISD::VECREDUCE_FMIN:
443     Action = TLI.getOperationAction(Node->getOpcode(),
444                                     Node->getOperand(0).getValueType());
445     break;
446   case ISD::VECREDUCE_SEQ_FADD:
447   case ISD::VECREDUCE_SEQ_FMUL:
448     Action = TLI.getOperationAction(Node->getOpcode(),
449                                     Node->getOperand(1).getValueType());
450     break;
451   case ISD::SETCC: {
452     MVT OpVT = Node->getOperand(0).getSimpleValueType();
453     ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(2))->get();
454     Action = TLI.getCondCodeAction(CCCode, OpVT);
455     if (Action == TargetLowering::Legal)
456       Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
457     break;
458   }
459 
460 #define BEGIN_REGISTER_VP_SDNODE(VPID, LEGALPOS, ...)                          \
461   case ISD::VPID: {                                                            \
462     EVT LegalizeVT = LEGALPOS < 0 ? Node->getValueType(-(1 + LEGALPOS))        \
463                                   : Node->getOperand(LEGALPOS).getValueType(); \
464     if (ISD::VPID == ISD::VP_SETCC) {                                          \
465       ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(2))->get(); \
466       Action = TLI.getCondCodeAction(CCCode, LegalizeVT.getSimpleVT());        \
467       if (Action != TargetLowering::Legal)                                     \
468         break;                                                                 \
469     }                                                                          \
470     Action = TLI.getOperationAction(Node->getOpcode(), LegalizeVT);            \
471   } break;
472 #include "llvm/IR/VPIntrinsics.def"
473   }
474 
475   LLVM_DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG));
476 
477   SmallVector<SDValue, 8> ResultVals;
478   switch (Action) {
479   default: llvm_unreachable("This action is not supported yet!");
480   case TargetLowering::Promote:
481     assert((Op.getOpcode() != ISD::LOAD && Op.getOpcode() != ISD::STORE) &&
482            "This action is not supported yet!");
483     LLVM_DEBUG(dbgs() << "Promoting\n");
484     Promote(Node, ResultVals);
485     assert(!ResultVals.empty() && "No results for promotion?");
486     break;
487   case TargetLowering::Legal:
488     LLVM_DEBUG(dbgs() << "Legal node: nothing to do\n");
489     break;
490   case TargetLowering::Custom:
491     LLVM_DEBUG(dbgs() << "Trying custom legalization\n");
492     if (LowerOperationWrapper(Node, ResultVals))
493       break;
494     LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
495     LLVM_FALLTHROUGH;
496   case TargetLowering::Expand:
497     LLVM_DEBUG(dbgs() << "Expanding\n");
498     Expand(Node, ResultVals);
499     break;
500   }
501 
502   if (ResultVals.empty())
503     return TranslateLegalizeResults(Op, Node);
504 
505   Changed = true;
506   return RecursivelyLegalizeResults(Op, ResultVals);
507 }
508 
509 // FIXME: This is very similar to TargetLowering::LowerOperationWrapper. Can we
510 // merge them somehow?
511 bool VectorLegalizer::LowerOperationWrapper(SDNode *Node,
512                                             SmallVectorImpl<SDValue> &Results) {
513   SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
514 
515   if (!Res.getNode())
516     return false;
517 
518   if (Res == SDValue(Node, 0))
519     return true;
520 
521   // If the original node has one result, take the return value from
522   // LowerOperation as is. It might not be result number 0.
523   if (Node->getNumValues() == 1) {
524     Results.push_back(Res);
525     return true;
526   }
527 
528   // If the original node has multiple results, then the return node should
529   // have the same number of results.
530   assert((Node->getNumValues() == Res->getNumValues()) &&
531          "Lowering returned the wrong number of results!");
532 
533   // Places new result values base on N result number.
534   for (unsigned I = 0, E = Node->getNumValues(); I != E; ++I)
535     Results.push_back(Res.getValue(I));
536 
537   return true;
538 }
539 
540 void VectorLegalizer::Promote(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
541   // For a few operations there is a specific concept for promotion based on
542   // the operand's type.
543   switch (Node->getOpcode()) {
544   case ISD::SINT_TO_FP:
545   case ISD::UINT_TO_FP:
546   case ISD::STRICT_SINT_TO_FP:
547   case ISD::STRICT_UINT_TO_FP:
548     // "Promote" the operation by extending the operand.
549     PromoteINT_TO_FP(Node, Results);
550     return;
551   case ISD::FP_TO_UINT:
552   case ISD::FP_TO_SINT:
553   case ISD::STRICT_FP_TO_UINT:
554   case ISD::STRICT_FP_TO_SINT:
555     // Promote the operation by extending the operand.
556     PromoteFP_TO_INT(Node, Results);
557     return;
558   case ISD::FP_ROUND:
559   case ISD::FP_EXTEND:
560     // These operations are used to do promotion so they can't be promoted
561     // themselves.
562     llvm_unreachable("Don't know how to promote this operation!");
563   }
564 
565   // There are currently two cases of vector promotion:
566   // 1) Bitcasting a vector of integers to a different type to a vector of the
567   //    same overall length. For example, x86 promotes ISD::AND v2i32 to v1i64.
568   // 2) Extending a vector of floats to a vector of the same number of larger
569   //    floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32.
570   assert(Node->getNumValues() == 1 &&
571          "Can't promote a vector with multiple results!");
572   MVT VT = Node->getSimpleValueType(0);
573   MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
574   SDLoc dl(Node);
575   SmallVector<SDValue, 4> Operands(Node->getNumOperands());
576 
577   for (unsigned j = 0; j != Node->getNumOperands(); ++j) {
578     if (Node->getOperand(j).getValueType().isVector())
579       if (Node->getOperand(j)
580               .getValueType()
581               .getVectorElementType()
582               .isFloatingPoint() &&
583           NVT.isVector() && NVT.getVectorElementType().isFloatingPoint())
584         Operands[j] = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(j));
585       else
586         Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(j));
587     else
588       Operands[j] = Node->getOperand(j);
589   }
590 
591   SDValue Res =
592       DAG.getNode(Node->getOpcode(), dl, NVT, Operands, Node->getFlags());
593 
594   if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) ||
595       (VT.isVector() && VT.getVectorElementType().isFloatingPoint() &&
596        NVT.isVector() && NVT.getVectorElementType().isFloatingPoint()))
597     Res = DAG.getNode(ISD::FP_ROUND, dl, VT, Res, DAG.getIntPtrConstant(0, dl));
598   else
599     Res = DAG.getNode(ISD::BITCAST, dl, VT, Res);
600 
601   Results.push_back(Res);
602 }
603 
604 void VectorLegalizer::PromoteINT_TO_FP(SDNode *Node,
605                                        SmallVectorImpl<SDValue> &Results) {
606   // INT_TO_FP operations may require the input operand be promoted even
607   // when the type is otherwise legal.
608   bool IsStrict = Node->isStrictFPOpcode();
609   MVT VT = Node->getOperand(IsStrict ? 1 : 0).getSimpleValueType();
610   MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
611   assert(NVT.getVectorNumElements() == VT.getVectorNumElements() &&
612          "Vectors have different number of elements!");
613 
614   SDLoc dl(Node);
615   SmallVector<SDValue, 4> Operands(Node->getNumOperands());
616 
617   unsigned Opc = (Node->getOpcode() == ISD::UINT_TO_FP ||
618                   Node->getOpcode() == ISD::STRICT_UINT_TO_FP)
619                      ? ISD::ZERO_EXTEND
620                      : ISD::SIGN_EXTEND;
621   for (unsigned j = 0; j != Node->getNumOperands(); ++j) {
622     if (Node->getOperand(j).getValueType().isVector())
623       Operands[j] = DAG.getNode(Opc, dl, NVT, Node->getOperand(j));
624     else
625       Operands[j] = Node->getOperand(j);
626   }
627 
628   if (IsStrict) {
629     SDValue Res = DAG.getNode(Node->getOpcode(), dl,
630                               {Node->getValueType(0), MVT::Other}, Operands);
631     Results.push_back(Res);
632     Results.push_back(Res.getValue(1));
633     return;
634   }
635 
636   SDValue Res =
637       DAG.getNode(Node->getOpcode(), dl, Node->getValueType(0), Operands);
638   Results.push_back(Res);
639 }
640 
641 // For FP_TO_INT we promote the result type to a vector type with wider
642 // elements and then truncate the result.  This is different from the default
643 // PromoteVector which uses bitcast to promote thus assumning that the
644 // promoted vector type has the same overall size.
645 void VectorLegalizer::PromoteFP_TO_INT(SDNode *Node,
646                                        SmallVectorImpl<SDValue> &Results) {
647   MVT VT = Node->getSimpleValueType(0);
648   MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
649   bool IsStrict = Node->isStrictFPOpcode();
650   assert(NVT.getVectorNumElements() == VT.getVectorNumElements() &&
651          "Vectors have different number of elements!");
652 
653   unsigned NewOpc = Node->getOpcode();
654   // Change FP_TO_UINT to FP_TO_SINT if possible.
655   // TODO: Should we only do this if FP_TO_UINT itself isn't legal?
656   if (NewOpc == ISD::FP_TO_UINT &&
657       TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NVT))
658     NewOpc = ISD::FP_TO_SINT;
659 
660   if (NewOpc == ISD::STRICT_FP_TO_UINT &&
661       TLI.isOperationLegalOrCustom(ISD::STRICT_FP_TO_SINT, NVT))
662     NewOpc = ISD::STRICT_FP_TO_SINT;
663 
664   SDLoc dl(Node);
665   SDValue Promoted, Chain;
666   if (IsStrict) {
667     Promoted = DAG.getNode(NewOpc, dl, {NVT, MVT::Other},
668                            {Node->getOperand(0), Node->getOperand(1)});
669     Chain = Promoted.getValue(1);
670   } else
671     Promoted = DAG.getNode(NewOpc, dl, NVT, Node->getOperand(0));
672 
673   // Assert that the converted value fits in the original type.  If it doesn't
674   // (eg: because the value being converted is too big), then the result of the
675   // original operation was undefined anyway, so the assert is still correct.
676   if (Node->getOpcode() == ISD::FP_TO_UINT ||
677       Node->getOpcode() == ISD::STRICT_FP_TO_UINT)
678     NewOpc = ISD::AssertZext;
679   else
680     NewOpc = ISD::AssertSext;
681 
682   Promoted = DAG.getNode(NewOpc, dl, NVT, Promoted,
683                          DAG.getValueType(VT.getScalarType()));
684   Promoted = DAG.getNode(ISD::TRUNCATE, dl, VT, Promoted);
685   Results.push_back(Promoted);
686   if (IsStrict)
687     Results.push_back(Chain);
688 }
689 
690 std::pair<SDValue, SDValue> VectorLegalizer::ExpandLoad(SDNode *N) {
691   LoadSDNode *LD = cast<LoadSDNode>(N);
692   return TLI.scalarizeVectorLoad(LD, DAG);
693 }
694 
695 SDValue VectorLegalizer::ExpandStore(SDNode *N) {
696   StoreSDNode *ST = cast<StoreSDNode>(N);
697   SDValue TF = TLI.scalarizeVectorStore(ST, DAG);
698   return TF;
699 }
700 
701 void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
702   switch (Node->getOpcode()) {
703   case ISD::LOAD: {
704     std::pair<SDValue, SDValue> Tmp = ExpandLoad(Node);
705     Results.push_back(Tmp.first);
706     Results.push_back(Tmp.second);
707     return;
708   }
709   case ISD::STORE:
710     Results.push_back(ExpandStore(Node));
711     return;
712   case ISD::MERGE_VALUES:
713     for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
714       Results.push_back(Node->getOperand(i));
715     return;
716   case ISD::SIGN_EXTEND_INREG:
717     Results.push_back(ExpandSEXTINREG(Node));
718     return;
719   case ISD::ANY_EXTEND_VECTOR_INREG:
720     Results.push_back(ExpandANY_EXTEND_VECTOR_INREG(Node));
721     return;
722   case ISD::SIGN_EXTEND_VECTOR_INREG:
723     Results.push_back(ExpandSIGN_EXTEND_VECTOR_INREG(Node));
724     return;
725   case ISD::ZERO_EXTEND_VECTOR_INREG:
726     Results.push_back(ExpandZERO_EXTEND_VECTOR_INREG(Node));
727     return;
728   case ISD::BSWAP:
729     Results.push_back(ExpandBSWAP(Node));
730     return;
731   case ISD::VSELECT:
732     Results.push_back(ExpandVSELECT(Node));
733     return;
734   case ISD::VP_SELECT:
735     Results.push_back(ExpandVP_SELECT(Node));
736     return;
737   case ISD::SELECT:
738     Results.push_back(ExpandSELECT(Node));
739     return;
740   case ISD::FP_TO_UINT:
741     ExpandFP_TO_UINT(Node, Results);
742     return;
743   case ISD::UINT_TO_FP:
744     ExpandUINT_TO_FLOAT(Node, Results);
745     return;
746   case ISD::FNEG:
747     Results.push_back(ExpandFNEG(Node));
748     return;
749   case ISD::FSUB:
750     ExpandFSUB(Node, Results);
751     return;
752   case ISD::SETCC:
753   case ISD::VP_SETCC:
754     ExpandSETCC(Node, Results);
755     return;
756   case ISD::ABS:
757     if (SDValue Expanded = TLI.expandABS(Node, DAG)) {
758       Results.push_back(Expanded);
759       return;
760     }
761     break;
762   case ISD::BITREVERSE:
763     ExpandBITREVERSE(Node, Results);
764     return;
765   case ISD::CTPOP:
766     if (SDValue Expanded = TLI.expandCTPOP(Node, DAG)) {
767       Results.push_back(Expanded);
768       return;
769     }
770     break;
771   case ISD::CTLZ:
772   case ISD::CTLZ_ZERO_UNDEF:
773     if (SDValue Expanded = TLI.expandCTLZ(Node, DAG)) {
774       Results.push_back(Expanded);
775       return;
776     }
777     break;
778   case ISD::CTTZ:
779   case ISD::CTTZ_ZERO_UNDEF:
780     if (SDValue Expanded = TLI.expandCTTZ(Node, DAG)) {
781       Results.push_back(Expanded);
782       return;
783     }
784     break;
785   case ISD::FSHL:
786   case ISD::FSHR:
787     if (SDValue Expanded = TLI.expandFunnelShift(Node, DAG)) {
788       Results.push_back(Expanded);
789       return;
790     }
791     break;
792   case ISD::ROTL:
793   case ISD::ROTR:
794     if (SDValue Expanded = TLI.expandROT(Node, false /*AllowVectorOps*/, DAG)) {
795       Results.push_back(Expanded);
796       return;
797     }
798     break;
799   case ISD::FMINNUM:
800   case ISD::FMAXNUM:
801     if (SDValue Expanded = TLI.expandFMINNUM_FMAXNUM(Node, DAG)) {
802       Results.push_back(Expanded);
803       return;
804     }
805     break;
806   case ISD::SMIN:
807   case ISD::SMAX:
808   case ISD::UMIN:
809   case ISD::UMAX:
810     if (SDValue Expanded = TLI.expandIntMINMAX(Node, DAG)) {
811       Results.push_back(Expanded);
812       return;
813     }
814     break;
815   case ISD::UADDO:
816   case ISD::USUBO:
817     ExpandUADDSUBO(Node, Results);
818     return;
819   case ISD::SADDO:
820   case ISD::SSUBO:
821     ExpandSADDSUBO(Node, Results);
822     return;
823   case ISD::UMULO:
824   case ISD::SMULO:
825     ExpandMULO(Node, Results);
826     return;
827   case ISD::USUBSAT:
828   case ISD::SSUBSAT:
829   case ISD::UADDSAT:
830   case ISD::SADDSAT:
831     if (SDValue Expanded = TLI.expandAddSubSat(Node, DAG)) {
832       Results.push_back(Expanded);
833       return;
834     }
835     break;
836   case ISD::SMULFIX:
837   case ISD::UMULFIX:
838     if (SDValue Expanded = TLI.expandFixedPointMul(Node, DAG)) {
839       Results.push_back(Expanded);
840       return;
841     }
842     break;
843   case ISD::SMULFIXSAT:
844   case ISD::UMULFIXSAT:
845     // FIXME: We do not expand SMULFIXSAT/UMULFIXSAT here yet, not sure exactly
846     // why. Maybe it results in worse codegen compared to the unroll for some
847     // targets? This should probably be investigated. And if we still prefer to
848     // unroll an explanation could be helpful.
849     break;
850   case ISD::SDIVFIX:
851   case ISD::UDIVFIX:
852     ExpandFixedPointDiv(Node, Results);
853     return;
854   case ISD::SDIVFIXSAT:
855   case ISD::UDIVFIXSAT:
856     break;
857 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
858   case ISD::STRICT_##DAGN:
859 #include "llvm/IR/ConstrainedOps.def"
860     ExpandStrictFPOp(Node, Results);
861     return;
862   case ISD::VECREDUCE_ADD:
863   case ISD::VECREDUCE_MUL:
864   case ISD::VECREDUCE_AND:
865   case ISD::VECREDUCE_OR:
866   case ISD::VECREDUCE_XOR:
867   case ISD::VECREDUCE_SMAX:
868   case ISD::VECREDUCE_SMIN:
869   case ISD::VECREDUCE_UMAX:
870   case ISD::VECREDUCE_UMIN:
871   case ISD::VECREDUCE_FADD:
872   case ISD::VECREDUCE_FMUL:
873   case ISD::VECREDUCE_FMAX:
874   case ISD::VECREDUCE_FMIN:
875     Results.push_back(TLI.expandVecReduce(Node, DAG));
876     return;
877   case ISD::VECREDUCE_SEQ_FADD:
878   case ISD::VECREDUCE_SEQ_FMUL:
879     Results.push_back(TLI.expandVecReduceSeq(Node, DAG));
880     return;
881   case ISD::SREM:
882   case ISD::UREM:
883     ExpandREM(Node, Results);
884     return;
885   case ISD::VP_MERGE:
886     Results.push_back(ExpandVP_MERGE(Node));
887     return;
888   }
889 
890   Results.push_back(DAG.UnrollVectorOp(Node));
891 }
892 
893 SDValue VectorLegalizer::ExpandSELECT(SDNode *Node) {
894   // Lower a select instruction where the condition is a scalar and the
895   // operands are vectors. Lower this select to VSELECT and implement it
896   // using XOR AND OR. The selector bit is broadcasted.
897   EVT VT = Node->getValueType(0);
898   SDLoc DL(Node);
899 
900   SDValue Mask = Node->getOperand(0);
901   SDValue Op1 = Node->getOperand(1);
902   SDValue Op2 = Node->getOperand(2);
903 
904   assert(VT.isVector() && !Mask.getValueType().isVector()
905          && Op1.getValueType() == Op2.getValueType() && "Invalid type");
906 
907   // If we can't even use the basic vector operations of
908   // AND,OR,XOR, we will have to scalarize the op.
909   // Notice that the operation may be 'promoted' which means that it is
910   // 'bitcasted' to another type which is handled.
911   // Also, we need to be able to construct a splat vector using either
912   // BUILD_VECTOR or SPLAT_VECTOR.
913   // FIXME: Should we also permit fixed-length SPLAT_VECTOR as a fallback to
914   // BUILD_VECTOR?
915   if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
916       TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
917       TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
918       TLI.getOperationAction(VT.isFixedLengthVector() ? ISD::BUILD_VECTOR
919                                                       : ISD::SPLAT_VECTOR,
920                              VT) == TargetLowering::Expand)
921     return DAG.UnrollVectorOp(Node);
922 
923   // Generate a mask operand.
924   EVT MaskTy = VT.changeVectorElementTypeToInteger();
925 
926   // What is the size of each element in the vector mask.
927   EVT BitTy = MaskTy.getScalarType();
928 
929   Mask = DAG.getSelect(DL, BitTy, Mask, DAG.getAllOnesConstant(DL, BitTy),
930                        DAG.getConstant(0, DL, BitTy));
931 
932   // Broadcast the mask so that the entire vector is all one or all zero.
933   if (VT.isFixedLengthVector())
934     Mask = DAG.getSplatBuildVector(MaskTy, DL, Mask);
935   else
936     Mask = DAG.getSplatVector(MaskTy, DL, Mask);
937 
938   // Bitcast the operands to be the same type as the mask.
939   // This is needed when we select between FP types because
940   // the mask is a vector of integers.
941   Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1);
942   Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2);
943 
944   SDValue NotMask = DAG.getNOT(DL, Mask, MaskTy);
945 
946   Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask);
947   Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask);
948   SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2);
949   return DAG.getNode(ISD::BITCAST, DL, Node->getValueType(0), Val);
950 }
951 
952 SDValue VectorLegalizer::ExpandSEXTINREG(SDNode *Node) {
953   EVT VT = Node->getValueType(0);
954 
955   // Make sure that the SRA and SHL instructions are available.
956   if (TLI.getOperationAction(ISD::SRA, VT) == TargetLowering::Expand ||
957       TLI.getOperationAction(ISD::SHL, VT) == TargetLowering::Expand)
958     return DAG.UnrollVectorOp(Node);
959 
960   SDLoc DL(Node);
961   EVT OrigTy = cast<VTSDNode>(Node->getOperand(1))->getVT();
962 
963   unsigned BW = VT.getScalarSizeInBits();
964   unsigned OrigBW = OrigTy.getScalarSizeInBits();
965   SDValue ShiftSz = DAG.getConstant(BW - OrigBW, DL, VT);
966 
967   SDValue Op = DAG.getNode(ISD::SHL, DL, VT, Node->getOperand(0), ShiftSz);
968   return DAG.getNode(ISD::SRA, DL, VT, Op, ShiftSz);
969 }
970 
971 // Generically expand a vector anyext in register to a shuffle of the relevant
972 // lanes into the appropriate locations, with other lanes left undef.
973 SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDNode *Node) {
974   SDLoc DL(Node);
975   EVT VT = Node->getValueType(0);
976   int NumElements = VT.getVectorNumElements();
977   SDValue Src = Node->getOperand(0);
978   EVT SrcVT = Src.getValueType();
979   int NumSrcElements = SrcVT.getVectorNumElements();
980 
981   // *_EXTEND_VECTOR_INREG SrcVT can be smaller than VT - so insert the vector
982   // into a larger vector type.
983   if (SrcVT.bitsLE(VT)) {
984     assert((VT.getSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
985            "ANY_EXTEND_VECTOR_INREG vector size mismatch");
986     NumSrcElements = VT.getSizeInBits() / SrcVT.getScalarSizeInBits();
987     SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
988                              NumSrcElements);
989     Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcVT, DAG.getUNDEF(SrcVT),
990                       Src, DAG.getVectorIdxConstant(0, DL));
991   }
992 
993   // Build a base mask of undef shuffles.
994   SmallVector<int, 16> ShuffleMask;
995   ShuffleMask.resize(NumSrcElements, -1);
996 
997   // Place the extended lanes into the correct locations.
998   int ExtLaneScale = NumSrcElements / NumElements;
999   int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
1000   for (int i = 0; i < NumElements; ++i)
1001     ShuffleMask[i * ExtLaneScale + EndianOffset] = i;
1002 
1003   return DAG.getNode(
1004       ISD::BITCAST, DL, VT,
1005       DAG.getVectorShuffle(SrcVT, DL, Src, DAG.getUNDEF(SrcVT), ShuffleMask));
1006 }
1007 
1008 SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDNode *Node) {
1009   SDLoc DL(Node);
1010   EVT VT = Node->getValueType(0);
1011   SDValue Src = Node->getOperand(0);
1012   EVT SrcVT = Src.getValueType();
1013 
1014   // First build an any-extend node which can be legalized above when we
1015   // recurse through it.
1016   SDValue Op = DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Src);
1017 
1018   // Now we need sign extend. Do this by shifting the elements. Even if these
1019   // aren't legal operations, they have a better chance of being legalized
1020   // without full scalarization than the sign extension does.
1021   unsigned EltWidth = VT.getScalarSizeInBits();
1022   unsigned SrcEltWidth = SrcVT.getScalarSizeInBits();
1023   SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT);
1024   return DAG.getNode(ISD::SRA, DL, VT,
1025                      DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount),
1026                      ShiftAmount);
1027 }
1028 
1029 // Generically expand a vector zext in register to a shuffle of the relevant
1030 // lanes into the appropriate locations, a blend of zero into the high bits,
1031 // and a bitcast to the wider element type.
1032 SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDNode *Node) {
1033   SDLoc DL(Node);
1034   EVT VT = Node->getValueType(0);
1035   int NumElements = VT.getVectorNumElements();
1036   SDValue Src = Node->getOperand(0);
1037   EVT SrcVT = Src.getValueType();
1038   int NumSrcElements = SrcVT.getVectorNumElements();
1039 
1040   // *_EXTEND_VECTOR_INREG SrcVT can be smaller than VT - so insert the vector
1041   // into a larger vector type.
1042   if (SrcVT.bitsLE(VT)) {
1043     assert((VT.getSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
1044            "ZERO_EXTEND_VECTOR_INREG vector size mismatch");
1045     NumSrcElements = VT.getSizeInBits() / SrcVT.getScalarSizeInBits();
1046     SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
1047                              NumSrcElements);
1048     Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcVT, DAG.getUNDEF(SrcVT),
1049                       Src, DAG.getVectorIdxConstant(0, DL));
1050   }
1051 
1052   // Build up a zero vector to blend into this one.
1053   SDValue Zero = DAG.getConstant(0, DL, SrcVT);
1054 
1055   // Shuffle the incoming lanes into the correct position, and pull all other
1056   // lanes from the zero vector.
1057   auto ShuffleMask = llvm::to_vector<16>(llvm::seq<int>(0, NumSrcElements));
1058 
1059   int ExtLaneScale = NumSrcElements / NumElements;
1060   int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
1061   for (int i = 0; i < NumElements; ++i)
1062     ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i;
1063 
1064   return DAG.getNode(ISD::BITCAST, DL, VT,
1065                      DAG.getVectorShuffle(SrcVT, DL, Zero, Src, ShuffleMask));
1066 }
1067 
1068 static void createBSWAPShuffleMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
1069   int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
1070   for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I)
1071     for (int J = ScalarSizeInBytes - 1; J >= 0; --J)
1072       ShuffleMask.push_back((I * ScalarSizeInBytes) + J);
1073 }
1074 
1075 SDValue VectorLegalizer::ExpandBSWAP(SDNode *Node) {
1076   EVT VT = Node->getValueType(0);
1077 
1078   // Scalable vectors can't use shuffle expansion.
1079   if (VT.isScalableVector())
1080     return TLI.expandBSWAP(Node, DAG);
1081 
1082   // Generate a byte wise shuffle mask for the BSWAP.
1083   SmallVector<int, 16> ShuffleMask;
1084   createBSWAPShuffleMask(VT, ShuffleMask);
1085   EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, ShuffleMask.size());
1086 
1087   // Only emit a shuffle if the mask is legal.
1088   if (TLI.isShuffleMaskLegal(ShuffleMask, ByteVT)) {
1089     SDLoc DL(Node);
1090     SDValue Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Node->getOperand(0));
1091     Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), ShuffleMask);
1092     return DAG.getNode(ISD::BITCAST, DL, VT, Op);
1093   }
1094 
1095   // If we have the appropriate vector bit operations, it is better to use them
1096   // than unrolling and expanding each component.
1097   if (TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
1098       TLI.isOperationLegalOrCustom(ISD::SRL, VT) &&
1099       TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT) &&
1100       TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT))
1101     return TLI.expandBSWAP(Node, DAG);
1102 
1103   // Otherwise unroll.
1104   return DAG.UnrollVectorOp(Node);
1105 }
1106 
1107 void VectorLegalizer::ExpandBITREVERSE(SDNode *Node,
1108                                        SmallVectorImpl<SDValue> &Results) {
1109   EVT VT = Node->getValueType(0);
1110 
1111   // We can't unroll or use shuffles for scalable vectors.
1112   if (VT.isScalableVector()) {
1113     Results.push_back(TLI.expandBITREVERSE(Node, DAG));
1114     return;
1115   }
1116 
1117   // If we have the scalar operation, it's probably cheaper to unroll it.
1118   if (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, VT.getScalarType())) {
1119     SDValue Tmp = DAG.UnrollVectorOp(Node);
1120     Results.push_back(Tmp);
1121     return;
1122   }
1123 
1124   // If the vector element width is a whole number of bytes, test if its legal
1125   // to BSWAP shuffle the bytes and then perform the BITREVERSE on the byte
1126   // vector. This greatly reduces the number of bit shifts necessary.
1127   unsigned ScalarSizeInBits = VT.getScalarSizeInBits();
1128   if (ScalarSizeInBits > 8 && (ScalarSizeInBits % 8) == 0) {
1129     SmallVector<int, 16> BSWAPMask;
1130     createBSWAPShuffleMask(VT, BSWAPMask);
1131 
1132     EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, BSWAPMask.size());
1133     if (TLI.isShuffleMaskLegal(BSWAPMask, ByteVT) &&
1134         (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, ByteVT) ||
1135          (TLI.isOperationLegalOrCustom(ISD::SHL, ByteVT) &&
1136           TLI.isOperationLegalOrCustom(ISD::SRL, ByteVT) &&
1137           TLI.isOperationLegalOrCustomOrPromote(ISD::AND, ByteVT) &&
1138           TLI.isOperationLegalOrCustomOrPromote(ISD::OR, ByteVT)))) {
1139       SDLoc DL(Node);
1140       SDValue Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Node->getOperand(0));
1141       Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT),
1142                                 BSWAPMask);
1143       Op = DAG.getNode(ISD::BITREVERSE, DL, ByteVT, Op);
1144       Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
1145       Results.push_back(Op);
1146       return;
1147     }
1148   }
1149 
1150   // If we have the appropriate vector bit operations, it is better to use them
1151   // than unrolling and expanding each component.
1152   if (TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
1153       TLI.isOperationLegalOrCustom(ISD::SRL, VT) &&
1154       TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT) &&
1155       TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT)) {
1156     Results.push_back(TLI.expandBITREVERSE(Node, DAG));
1157     return;
1158   }
1159 
1160   // Otherwise unroll.
1161   SDValue Tmp = DAG.UnrollVectorOp(Node);
1162   Results.push_back(Tmp);
1163 }
1164 
1165 SDValue VectorLegalizer::ExpandVSELECT(SDNode *Node) {
1166   // Implement VSELECT in terms of XOR, AND, OR
1167   // on platforms which do not support blend natively.
1168   SDLoc DL(Node);
1169 
1170   SDValue Mask = Node->getOperand(0);
1171   SDValue Op1 = Node->getOperand(1);
1172   SDValue Op2 = Node->getOperand(2);
1173 
1174   EVT VT = Mask.getValueType();
1175 
1176   // If we can't even use the basic vector operations of
1177   // AND,OR,XOR, we will have to scalarize the op.
1178   // Notice that the operation may be 'promoted' which means that it is
1179   // 'bitcasted' to another type which is handled.
1180   if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
1181       TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
1182       TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand)
1183     return DAG.UnrollVectorOp(Node);
1184 
1185   // This operation also isn't safe with AND, OR, XOR when the boolean type is
1186   // 0/1 and the select operands aren't also booleans, as we need an all-ones
1187   // vector constant to mask with.
1188   // FIXME: Sign extend 1 to all ones if that's legal on the target.
1189   auto BoolContents = TLI.getBooleanContents(Op1.getValueType());
1190   if (BoolContents != TargetLowering::ZeroOrNegativeOneBooleanContent &&
1191       !(BoolContents == TargetLowering::ZeroOrOneBooleanContent &&
1192         Op1.getValueType().getVectorElementType() == MVT::i1))
1193     return DAG.UnrollVectorOp(Node);
1194 
1195   // If the mask and the type are different sizes, unroll the vector op. This
1196   // can occur when getSetCCResultType returns something that is different in
1197   // size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8.
1198   if (VT.getSizeInBits() != Op1.getValueSizeInBits())
1199     return DAG.UnrollVectorOp(Node);
1200 
1201   // Bitcast the operands to be the same type as the mask.
1202   // This is needed when we select between FP types because
1203   // the mask is a vector of integers.
1204   Op1 = DAG.getNode(ISD::BITCAST, DL, VT, Op1);
1205   Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2);
1206 
1207   SDValue NotMask = DAG.getNOT(DL, Mask, VT);
1208 
1209   Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask);
1210   Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask);
1211   SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2);
1212   return DAG.getNode(ISD::BITCAST, DL, Node->getValueType(0), Val);
1213 }
1214 
1215 SDValue VectorLegalizer::ExpandVP_SELECT(SDNode *Node) {
1216   // Implement VP_SELECT in terms of VP_XOR, VP_AND and VP_OR on platforms which
1217   // do not support it natively.
1218   SDLoc DL(Node);
1219 
1220   SDValue Mask = Node->getOperand(0);
1221   SDValue Op1 = Node->getOperand(1);
1222   SDValue Op2 = Node->getOperand(2);
1223   SDValue EVL = Node->getOperand(3);
1224 
1225   EVT VT = Mask.getValueType();
1226 
1227   // If we can't even use the basic vector operations of
1228   // VP_AND,VP_OR,VP_XOR, we will have to scalarize the op.
1229   if (TLI.getOperationAction(ISD::VP_AND, VT) == TargetLowering::Expand ||
1230       TLI.getOperationAction(ISD::VP_XOR, VT) == TargetLowering::Expand ||
1231       TLI.getOperationAction(ISD::VP_OR, VT) == TargetLowering::Expand)
1232     return DAG.UnrollVectorOp(Node);
1233 
1234   // This operation also isn't safe when the operands aren't also booleans.
1235   if (Op1.getValueType().getVectorElementType() != MVT::i1)
1236     return DAG.UnrollVectorOp(Node);
1237 
1238   SDValue Ones = DAG.getAllOnesConstant(DL, VT);
1239   SDValue NotMask = DAG.getNode(ISD::VP_XOR, DL, VT, Mask, Ones, Mask, EVL);
1240 
1241   Op1 = DAG.getNode(ISD::VP_AND, DL, VT, Op1, Mask, Mask, EVL);
1242   Op2 = DAG.getNode(ISD::VP_AND, DL, VT, Op2, NotMask, Mask, EVL);
1243   return DAG.getNode(ISD::VP_OR, DL, VT, Op1, Op2, Mask, EVL);
1244 }
1245 
1246 SDValue VectorLegalizer::ExpandVP_MERGE(SDNode *Node) {
1247   // Implement VP_MERGE in terms of VSELECT. Construct a mask where vector
1248   // indices less than the EVL/pivot are true. Combine that with the original
1249   // mask for a full-length mask. Use a full-length VSELECT to select between
1250   // the true and false values.
1251   SDLoc DL(Node);
1252 
1253   SDValue Mask = Node->getOperand(0);
1254   SDValue Op1 = Node->getOperand(1);
1255   SDValue Op2 = Node->getOperand(2);
1256   SDValue EVL = Node->getOperand(3);
1257 
1258   EVT MaskVT = Mask.getValueType();
1259   bool IsFixedLen = MaskVT.isFixedLengthVector();
1260 
1261   EVT EVLVecVT = EVT::getVectorVT(*DAG.getContext(), EVL.getValueType(),
1262                                   MaskVT.getVectorElementCount());
1263 
1264   // If we can't construct the EVL mask efficiently, it's better to unroll.
1265   if ((IsFixedLen &&
1266        !TLI.isOperationLegalOrCustom(ISD::BUILD_VECTOR, EVLVecVT)) ||
1267       (!IsFixedLen &&
1268        (!TLI.isOperationLegalOrCustom(ISD::STEP_VECTOR, EVLVecVT) ||
1269         !TLI.isOperationLegalOrCustom(ISD::SPLAT_VECTOR, EVLVecVT))))
1270     return DAG.UnrollVectorOp(Node);
1271 
1272   // If using a SETCC would result in a different type than the mask type,
1273   // unroll.
1274   if (TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
1275                              EVLVecVT) != MaskVT)
1276     return DAG.UnrollVectorOp(Node);
1277 
1278   SDValue StepVec = DAG.getStepVector(DL, EVLVecVT);
1279   SDValue SplatEVL = IsFixedLen ? DAG.getSplatBuildVector(EVLVecVT, DL, EVL)
1280                                 : DAG.getSplatVector(EVLVecVT, DL, EVL);
1281   SDValue EVLMask =
1282       DAG.getSetCC(DL, MaskVT, StepVec, SplatEVL, ISD::CondCode::SETULT);
1283 
1284   SDValue FullMask = DAG.getNode(ISD::AND, DL, MaskVT, Mask, EVLMask);
1285   return DAG.getSelect(DL, Node->getValueType(0), FullMask, Op1, Op2);
1286 }
1287 
1288 void VectorLegalizer::ExpandFP_TO_UINT(SDNode *Node,
1289                                        SmallVectorImpl<SDValue> &Results) {
1290   // Attempt to expand using TargetLowering.
1291   SDValue Result, Chain;
1292   if (TLI.expandFP_TO_UINT(Node, Result, Chain, DAG)) {
1293     Results.push_back(Result);
1294     if (Node->isStrictFPOpcode())
1295       Results.push_back(Chain);
1296     return;
1297   }
1298 
1299   // Otherwise go ahead and unroll.
1300   if (Node->isStrictFPOpcode()) {
1301     UnrollStrictFPOp(Node, Results);
1302     return;
1303   }
1304 
1305   Results.push_back(DAG.UnrollVectorOp(Node));
1306 }
1307 
1308 void VectorLegalizer::ExpandUINT_TO_FLOAT(SDNode *Node,
1309                                           SmallVectorImpl<SDValue> &Results) {
1310   bool IsStrict = Node->isStrictFPOpcode();
1311   unsigned OpNo = IsStrict ? 1 : 0;
1312   SDValue Src = Node->getOperand(OpNo);
1313   EVT VT = Src.getValueType();
1314   SDLoc DL(Node);
1315 
1316   // Attempt to expand using TargetLowering.
1317   SDValue Result;
1318   SDValue Chain;
1319   if (TLI.expandUINT_TO_FP(Node, Result, Chain, DAG)) {
1320     Results.push_back(Result);
1321     if (IsStrict)
1322       Results.push_back(Chain);
1323     return;
1324   }
1325 
1326   // Make sure that the SINT_TO_FP and SRL instructions are available.
1327   if (((!IsStrict && TLI.getOperationAction(ISD::SINT_TO_FP, VT) ==
1328                          TargetLowering::Expand) ||
1329        (IsStrict && TLI.getOperationAction(ISD::STRICT_SINT_TO_FP, VT) ==
1330                         TargetLowering::Expand)) ||
1331       TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Expand) {
1332     if (IsStrict) {
1333       UnrollStrictFPOp(Node, Results);
1334       return;
1335     }
1336 
1337     Results.push_back(DAG.UnrollVectorOp(Node));
1338     return;
1339   }
1340 
1341   unsigned BW = VT.getScalarSizeInBits();
1342   assert((BW == 64 || BW == 32) &&
1343          "Elements in vector-UINT_TO_FP must be 32 or 64 bits wide");
1344 
1345   SDValue HalfWord = DAG.getConstant(BW / 2, DL, VT);
1346 
1347   // Constants to clear the upper part of the word.
1348   // Notice that we can also use SHL+SHR, but using a constant is slightly
1349   // faster on x86.
1350   uint64_t HWMask = (BW == 64) ? 0x00000000FFFFFFFF : 0x0000FFFF;
1351   SDValue HalfWordMask = DAG.getConstant(HWMask, DL, VT);
1352 
1353   // Two to the power of half-word-size.
1354   SDValue TWOHW =
1355       DAG.getConstantFP(1ULL << (BW / 2), DL, Node->getValueType(0));
1356 
1357   // Clear upper part of LO, lower HI
1358   SDValue HI = DAG.getNode(ISD::SRL, DL, VT, Src, HalfWord);
1359   SDValue LO = DAG.getNode(ISD::AND, DL, VT, Src, HalfWordMask);
1360 
1361   if (IsStrict) {
1362     // Convert hi and lo to floats
1363     // Convert the hi part back to the upper values
1364     // TODO: Can any fast-math-flags be set on these nodes?
1365     SDValue fHI = DAG.getNode(ISD::STRICT_SINT_TO_FP, DL,
1366                               {Node->getValueType(0), MVT::Other},
1367                               {Node->getOperand(0), HI});
1368     fHI = DAG.getNode(ISD::STRICT_FMUL, DL, {Node->getValueType(0), MVT::Other},
1369                       {fHI.getValue(1), fHI, TWOHW});
1370     SDValue fLO = DAG.getNode(ISD::STRICT_SINT_TO_FP, DL,
1371                               {Node->getValueType(0), MVT::Other},
1372                               {Node->getOperand(0), LO});
1373 
1374     SDValue TF = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, fHI.getValue(1),
1375                              fLO.getValue(1));
1376 
1377     // Add the two halves
1378     SDValue Result =
1379         DAG.getNode(ISD::STRICT_FADD, DL, {Node->getValueType(0), MVT::Other},
1380                     {TF, fHI, fLO});
1381 
1382     Results.push_back(Result);
1383     Results.push_back(Result.getValue(1));
1384     return;
1385   }
1386 
1387   // Convert hi and lo to floats
1388   // Convert the hi part back to the upper values
1389   // TODO: Can any fast-math-flags be set on these nodes?
1390   SDValue fHI = DAG.getNode(ISD::SINT_TO_FP, DL, Node->getValueType(0), HI);
1391   fHI = DAG.getNode(ISD::FMUL, DL, Node->getValueType(0), fHI, TWOHW);
1392   SDValue fLO = DAG.getNode(ISD::SINT_TO_FP, DL, Node->getValueType(0), LO);
1393 
1394   // Add the two halves
1395   Results.push_back(
1396       DAG.getNode(ISD::FADD, DL, Node->getValueType(0), fHI, fLO));
1397 }
1398 
1399 SDValue VectorLegalizer::ExpandFNEG(SDNode *Node) {
1400   if (TLI.isOperationLegalOrCustom(ISD::FSUB, Node->getValueType(0))) {
1401     SDLoc DL(Node);
1402     SDValue Zero = DAG.getConstantFP(-0.0, DL, Node->getValueType(0));
1403     // TODO: If FNEG had fast-math-flags, they'd get propagated to this FSUB.
1404     return DAG.getNode(ISD::FSUB, DL, Node->getValueType(0), Zero,
1405                        Node->getOperand(0));
1406   }
1407   return DAG.UnrollVectorOp(Node);
1408 }
1409 
1410 void VectorLegalizer::ExpandFSUB(SDNode *Node,
1411                                  SmallVectorImpl<SDValue> &Results) {
1412   // For floating-point values, (a-b) is the same as a+(-b). If FNEG is legal,
1413   // we can defer this to operation legalization where it will be lowered as
1414   // a+(-b).
1415   EVT VT = Node->getValueType(0);
1416   if (TLI.isOperationLegalOrCustom(ISD::FNEG, VT) &&
1417       TLI.isOperationLegalOrCustom(ISD::FADD, VT))
1418     return; // Defer to LegalizeDAG
1419 
1420   SDValue Tmp = DAG.UnrollVectorOp(Node);
1421   Results.push_back(Tmp);
1422 }
1423 
1424 void VectorLegalizer::ExpandSETCC(SDNode *Node,
1425                                   SmallVectorImpl<SDValue> &Results) {
1426   bool NeedInvert = false;
1427   bool IsVP = Node->getOpcode() == ISD::VP_SETCC;
1428   SDLoc dl(Node);
1429   MVT OpVT = Node->getOperand(0).getSimpleValueType();
1430   ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(2))->get();
1431 
1432   if (TLI.getCondCodeAction(CCCode, OpVT) != TargetLowering::Expand) {
1433     Results.push_back(UnrollVSETCC(Node));
1434     return;
1435   }
1436 
1437   SDValue Chain;
1438   SDValue LHS = Node->getOperand(0);
1439   SDValue RHS = Node->getOperand(1);
1440   SDValue CC = Node->getOperand(2);
1441   SDValue Mask, EVL;
1442   if (IsVP) {
1443     Mask = Node->getOperand(3);
1444     EVL = Node->getOperand(4);
1445   }
1446 
1447   bool Legalized =
1448       TLI.LegalizeSetCCCondCode(DAG, Node->getValueType(0), LHS, RHS, CC, Mask,
1449                                 EVL, NeedInvert, dl, Chain);
1450 
1451   if (Legalized) {
1452     // If we expanded the SETCC by swapping LHS and RHS, or by inverting the
1453     // condition code, create a new SETCC node.
1454     if (CC.getNode()) {
1455       if (!IsVP)
1456         LHS = DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), LHS, RHS, CC,
1457                           Node->getFlags());
1458       else
1459         LHS = DAG.getNode(ISD::VP_SETCC, dl, Node->getValueType(0),
1460                           {LHS, RHS, CC, Mask, EVL}, Node->getFlags());
1461     }
1462 
1463     // If we expanded the SETCC by inverting the condition code, then wrap
1464     // the existing SETCC in a NOT to restore the intended condition.
1465     if (NeedInvert) {
1466       if (!IsVP)
1467         LHS = DAG.getLogicalNOT(dl, LHS, LHS->getValueType(0));
1468       else
1469         LHS = DAG.getVPLogicalNOT(dl, LHS, Mask, EVL, LHS->getValueType(0));
1470     }
1471   } else {
1472     // Otherwise, SETCC for the given comparison type must be completely
1473     // illegal; expand it into a SELECT_CC.
1474     EVT VT = Node->getValueType(0);
1475     LHS =
1476         DAG.getNode(ISD::SELECT_CC, dl, VT, LHS, RHS,
1477                     DAG.getBoolConstant(true, dl, VT, LHS.getValueType()),
1478                     DAG.getBoolConstant(false, dl, VT, LHS.getValueType()), CC);
1479     LHS->setFlags(Node->getFlags());
1480   }
1481 
1482   Results.push_back(LHS);
1483 }
1484 
1485 void VectorLegalizer::ExpandUADDSUBO(SDNode *Node,
1486                                      SmallVectorImpl<SDValue> &Results) {
1487   SDValue Result, Overflow;
1488   TLI.expandUADDSUBO(Node, Result, Overflow, DAG);
1489   Results.push_back(Result);
1490   Results.push_back(Overflow);
1491 }
1492 
1493 void VectorLegalizer::ExpandSADDSUBO(SDNode *Node,
1494                                      SmallVectorImpl<SDValue> &Results) {
1495   SDValue Result, Overflow;
1496   TLI.expandSADDSUBO(Node, Result, Overflow, DAG);
1497   Results.push_back(Result);
1498   Results.push_back(Overflow);
1499 }
1500 
1501 void VectorLegalizer::ExpandMULO(SDNode *Node,
1502                                  SmallVectorImpl<SDValue> &Results) {
1503   SDValue Result, Overflow;
1504   if (!TLI.expandMULO(Node, Result, Overflow, DAG))
1505     std::tie(Result, Overflow) = DAG.UnrollVectorOverflowOp(Node);
1506 
1507   Results.push_back(Result);
1508   Results.push_back(Overflow);
1509 }
1510 
1511 void VectorLegalizer::ExpandFixedPointDiv(SDNode *Node,
1512                                           SmallVectorImpl<SDValue> &Results) {
1513   SDNode *N = Node;
1514   if (SDValue Expanded = TLI.expandFixedPointDiv(N->getOpcode(), SDLoc(N),
1515           N->getOperand(0), N->getOperand(1), N->getConstantOperandVal(2), DAG))
1516     Results.push_back(Expanded);
1517 }
1518 
1519 void VectorLegalizer::ExpandStrictFPOp(SDNode *Node,
1520                                        SmallVectorImpl<SDValue> &Results) {
1521   if (Node->getOpcode() == ISD::STRICT_UINT_TO_FP) {
1522     ExpandUINT_TO_FLOAT(Node, Results);
1523     return;
1524   }
1525   if (Node->getOpcode() == ISD::STRICT_FP_TO_UINT) {
1526     ExpandFP_TO_UINT(Node, Results);
1527     return;
1528   }
1529 
1530   UnrollStrictFPOp(Node, Results);
1531 }
1532 
1533 void VectorLegalizer::ExpandREM(SDNode *Node,
1534                                 SmallVectorImpl<SDValue> &Results) {
1535   assert((Node->getOpcode() == ISD::SREM || Node->getOpcode() == ISD::UREM) &&
1536          "Expected REM node");
1537 
1538   SDValue Result;
1539   if (!TLI.expandREM(Node, Result, DAG))
1540     Result = DAG.UnrollVectorOp(Node);
1541   Results.push_back(Result);
1542 }
1543 
1544 void VectorLegalizer::UnrollStrictFPOp(SDNode *Node,
1545                                        SmallVectorImpl<SDValue> &Results) {
1546   EVT VT = Node->getValueType(0);
1547   EVT EltVT = VT.getVectorElementType();
1548   unsigned NumElems = VT.getVectorNumElements();
1549   unsigned NumOpers = Node->getNumOperands();
1550   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1551 
1552   EVT TmpEltVT = EltVT;
1553   if (Node->getOpcode() == ISD::STRICT_FSETCC ||
1554       Node->getOpcode() == ISD::STRICT_FSETCCS)
1555     TmpEltVT = TLI.getSetCCResultType(DAG.getDataLayout(),
1556                                       *DAG.getContext(), TmpEltVT);
1557 
1558   EVT ValueVTs[] = {TmpEltVT, MVT::Other};
1559   SDValue Chain = Node->getOperand(0);
1560   SDLoc dl(Node);
1561 
1562   SmallVector<SDValue, 32> OpValues;
1563   SmallVector<SDValue, 32> OpChains;
1564   for (unsigned i = 0; i < NumElems; ++i) {
1565     SmallVector<SDValue, 4> Opers;
1566     SDValue Idx = DAG.getVectorIdxConstant(i, dl);
1567 
1568     // The Chain is the first operand.
1569     Opers.push_back(Chain);
1570 
1571     // Now process the remaining operands.
1572     for (unsigned j = 1; j < NumOpers; ++j) {
1573       SDValue Oper = Node->getOperand(j);
1574       EVT OperVT = Oper.getValueType();
1575 
1576       if (OperVT.isVector())
1577         Oper = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
1578                            OperVT.getVectorElementType(), Oper, Idx);
1579 
1580       Opers.push_back(Oper);
1581     }
1582 
1583     SDValue ScalarOp = DAG.getNode(Node->getOpcode(), dl, ValueVTs, Opers);
1584     SDValue ScalarResult = ScalarOp.getValue(0);
1585     SDValue ScalarChain = ScalarOp.getValue(1);
1586 
1587     if (Node->getOpcode() == ISD::STRICT_FSETCC ||
1588         Node->getOpcode() == ISD::STRICT_FSETCCS)
1589       ScalarResult = DAG.getSelect(dl, EltVT, ScalarResult,
1590                                    DAG.getAllOnesConstant(dl, EltVT),
1591                                    DAG.getConstant(0, dl, EltVT));
1592 
1593     OpValues.push_back(ScalarResult);
1594     OpChains.push_back(ScalarChain);
1595   }
1596 
1597   SDValue Result = DAG.getBuildVector(VT, dl, OpValues);
1598   SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OpChains);
1599 
1600   Results.push_back(Result);
1601   Results.push_back(NewChain);
1602 }
1603 
1604 SDValue VectorLegalizer::UnrollVSETCC(SDNode *Node) {
1605   EVT VT = Node->getValueType(0);
1606   unsigned NumElems = VT.getVectorNumElements();
1607   EVT EltVT = VT.getVectorElementType();
1608   SDValue LHS = Node->getOperand(0);
1609   SDValue RHS = Node->getOperand(1);
1610   SDValue CC = Node->getOperand(2);
1611   EVT TmpEltVT = LHS.getValueType().getVectorElementType();
1612   SDLoc dl(Node);
1613   SmallVector<SDValue, 8> Ops(NumElems);
1614   for (unsigned i = 0; i < NumElems; ++i) {
1615     SDValue LHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS,
1616                                   DAG.getVectorIdxConstant(i, dl));
1617     SDValue RHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS,
1618                                   DAG.getVectorIdxConstant(i, dl));
1619     Ops[i] = DAG.getNode(ISD::SETCC, dl,
1620                          TLI.getSetCCResultType(DAG.getDataLayout(),
1621                                                 *DAG.getContext(), TmpEltVT),
1622                          LHSElem, RHSElem, CC);
1623     Ops[i] = DAG.getSelect(dl, EltVT, Ops[i], DAG.getAllOnesConstant(dl, EltVT),
1624                            DAG.getConstant(0, dl, EltVT));
1625   }
1626   return DAG.getBuildVector(VT, dl, Ops);
1627 }
1628 
1629 bool SelectionDAG::LegalizeVectors() {
1630   return VectorLegalizer(*this).Run();
1631 }
1632