1 //===- LegalizeVectorOps.cpp - Implement SelectionDAG::LegalizeVectors ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the SelectionDAG::LegalizeVectors method.
10 //
11 // The vector legalizer looks for vector operations which might need to be
12 // scalarized and legalizes them. This is a separate step from Legalize because
13 // scalarizing can introduce illegal types. For example, suppose we have an
14 // ISD::SDIV of type v2i64 on x86-32. The type is legal (for example, addition
15 // on a v2i64 is legal), but ISD::SDIV isn't legal, so we have to unroll the
16 // operation, which introduces nodes with the illegal type i64 which must be
17 // expanded. Similarly, suppose we have an ISD::SRA of type v16i8 on PowerPC;
18 // the operation must be unrolled, which introduces nodes with the illegal
19 // type i8 which must be promoted.
20 //
21 // This does not legalize vector manipulations like ISD::BUILD_VECTOR,
22 // or operations that happen to take a vector which are custom-lowered;
23 // the legalization for such operations never produces nodes
24 // with illegal types, so it's okay to put off legalizing them until
25 // SelectionDAG::Legalize runs.
26 //
27 //===----------------------------------------------------------------------===//
28
29 #include "llvm/ADT/APInt.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/CodeGen/ISDOpcodes.h"
33 #include "llvm/CodeGen/MachineMemOperand.h"
34 #include "llvm/CodeGen/SelectionDAG.h"
35 #include "llvm/CodeGen/SelectionDAGNodes.h"
36 #include "llvm/CodeGen/TargetLowering.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/Support/Casting.h"
40 #include "llvm/Support/Compiler.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/MachineValueType.h"
44 #include "llvm/Support/MathExtras.h"
45 #include <cassert>
46 #include <cstdint>
47 #include <iterator>
48 #include <utility>
49
50 using namespace llvm;
51
52 #define DEBUG_TYPE "legalizevectorops"
53
54 namespace {
55
56 class VectorLegalizer {
57 SelectionDAG& DAG;
58 const TargetLowering &TLI;
59 bool Changed = false; // Keep track of whether anything changed
60
61 /// For nodes that are of legal width, and that have more than one use, this
62 /// map indicates what regularized operand to use. This allows us to avoid
63 /// legalizing the same thing more than once.
64 SmallDenseMap<SDValue, SDValue, 64> LegalizedNodes;
65
66 /// Adds a node to the translation cache.
AddLegalizedOperand(SDValue From,SDValue To)67 void AddLegalizedOperand(SDValue From, SDValue To) {
68 LegalizedNodes.insert(std::make_pair(From, To));
69 // If someone requests legalization of the new node, return itself.
70 if (From != To)
71 LegalizedNodes.insert(std::make_pair(To, To));
72 }
73
74 /// Legalizes the given node.
75 SDValue LegalizeOp(SDValue Op);
76
77 /// Assuming the node is legal, "legalize" the results.
78 SDValue TranslateLegalizeResults(SDValue Op, SDNode *Result);
79
80 /// Make sure Results are legal and update the translation cache.
81 SDValue RecursivelyLegalizeResults(SDValue Op,
82 MutableArrayRef<SDValue> Results);
83
84 /// Wrapper to interface LowerOperation with a vector of Results.
85 /// Returns false if the target wants to use default expansion. Otherwise
86 /// returns true. If return is true and the Results are empty, then the
87 /// target wants to keep the input node as is.
88 bool LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results);
89
90 /// Implements unrolling a VSETCC.
91 SDValue UnrollVSETCC(SDNode *Node);
92
93 /// Implement expand-based legalization of vector operations.
94 ///
95 /// This is just a high-level routine to dispatch to specific code paths for
96 /// operations to legalize them.
97 void Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results);
98
99 /// Implements expansion for FP_TO_UINT; falls back to UnrollVectorOp if
100 /// FP_TO_SINT isn't legal.
101 void ExpandFP_TO_UINT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
102
103 /// Implements expansion for UINT_TO_FLOAT; falls back to UnrollVectorOp if
104 /// SINT_TO_FLOAT and SHR on vectors isn't legal.
105 void ExpandUINT_TO_FLOAT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
106
107 /// Implement expansion for SIGN_EXTEND_INREG using SRL and SRA.
108 SDValue ExpandSEXTINREG(SDNode *Node);
109
110 /// Implement expansion for ANY_EXTEND_VECTOR_INREG.
111 ///
112 /// Shuffles the low lanes of the operand into place and bitcasts to the proper
113 /// type. The contents of the bits in the extended part of each element are
114 /// undef.
115 SDValue ExpandANY_EXTEND_VECTOR_INREG(SDNode *Node);
116
117 /// Implement expansion for SIGN_EXTEND_VECTOR_INREG.
118 ///
119 /// Shuffles the low lanes of the operand into place, bitcasts to the proper
120 /// type, then shifts left and arithmetic shifts right to introduce a sign
121 /// extension.
122 SDValue ExpandSIGN_EXTEND_VECTOR_INREG(SDNode *Node);
123
124 /// Implement expansion for ZERO_EXTEND_VECTOR_INREG.
125 ///
126 /// Shuffles the low lanes of the operand into place and blends zeros into
127 /// the remaining lanes, finally bitcasting to the proper type.
128 SDValue ExpandZERO_EXTEND_VECTOR_INREG(SDNode *Node);
129
130 /// Expand bswap of vectors into a shuffle if legal.
131 SDValue ExpandBSWAP(SDNode *Node);
132
133 /// Implement vselect in terms of XOR, AND, OR when blend is not
134 /// supported by the target.
135 SDValue ExpandVSELECT(SDNode *Node);
136 SDValue ExpandSELECT(SDNode *Node);
137 std::pair<SDValue, SDValue> ExpandLoad(SDNode *N);
138 SDValue ExpandStore(SDNode *N);
139 SDValue ExpandFNEG(SDNode *Node);
140 void ExpandFSUB(SDNode *Node, SmallVectorImpl<SDValue> &Results);
141 void ExpandSETCC(SDNode *Node, SmallVectorImpl<SDValue> &Results);
142 void ExpandBITREVERSE(SDNode *Node, SmallVectorImpl<SDValue> &Results);
143 void ExpandUADDSUBO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
144 void ExpandSADDSUBO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
145 void ExpandMULO(SDNode *Node, SmallVectorImpl<SDValue> &Results);
146 void ExpandFixedPointDiv(SDNode *Node, SmallVectorImpl<SDValue> &Results);
147 void ExpandStrictFPOp(SDNode *Node, SmallVectorImpl<SDValue> &Results);
148 void ExpandREM(SDNode *Node, SmallVectorImpl<SDValue> &Results);
149
150 void UnrollStrictFPOp(SDNode *Node, SmallVectorImpl<SDValue> &Results);
151
152 /// Implements vector promotion.
153 ///
154 /// This is essentially just bitcasting the operands to a different type and
155 /// bitcasting the result back to the original type.
156 void Promote(SDNode *Node, SmallVectorImpl<SDValue> &Results);
157
158 /// Implements [SU]INT_TO_FP vector promotion.
159 ///
160 /// This is a [zs]ext of the input operand to a larger integer type.
161 void PromoteINT_TO_FP(SDNode *Node, SmallVectorImpl<SDValue> &Results);
162
163 /// Implements FP_TO_[SU]INT vector promotion of the result type.
164 ///
165 /// It is promoted to a larger integer type. The result is then
166 /// truncated back to the original type.
167 void PromoteFP_TO_INT(SDNode *Node, SmallVectorImpl<SDValue> &Results);
168
169 public:
VectorLegalizer(SelectionDAG & dag)170 VectorLegalizer(SelectionDAG& dag) :
171 DAG(dag), TLI(dag.getTargetLoweringInfo()) {}
172
173 /// Begin legalizer the vector operations in the DAG.
174 bool Run();
175 };
176
177 } // end anonymous namespace
178
Run()179 bool VectorLegalizer::Run() {
180 // Before we start legalizing vector nodes, check if there are any vectors.
181 bool HasVectors = false;
182 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
183 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) {
184 // Check if the values of the nodes contain vectors. We don't need to check
185 // the operands because we are going to check their values at some point.
186 HasVectors = llvm::any_of(I->values(), [](EVT T) { return T.isVector(); });
187
188 // If we found a vector node we can start the legalization.
189 if (HasVectors)
190 break;
191 }
192
193 // If this basic block has no vectors then no need to legalize vectors.
194 if (!HasVectors)
195 return false;
196
197 // The legalize process is inherently a bottom-up recursive process (users
198 // legalize their uses before themselves). Given infinite stack space, we
199 // could just start legalizing on the root and traverse the whole graph. In
200 // practice however, this causes us to run out of stack space on large basic
201 // blocks. To avoid this problem, compute an ordering of the nodes where each
202 // node is only legalized after all of its operands are legalized.
203 DAG.AssignTopologicalOrder();
204 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
205 E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I)
206 LegalizeOp(SDValue(&*I, 0));
207
208 // Finally, it's possible the root changed. Get the new root.
209 SDValue OldRoot = DAG.getRoot();
210 assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?");
211 DAG.setRoot(LegalizedNodes[OldRoot]);
212
213 LegalizedNodes.clear();
214
215 // Remove dead nodes now.
216 DAG.RemoveDeadNodes();
217
218 return Changed;
219 }
220
TranslateLegalizeResults(SDValue Op,SDNode * Result)221 SDValue VectorLegalizer::TranslateLegalizeResults(SDValue Op, SDNode *Result) {
222 assert(Op->getNumValues() == Result->getNumValues() &&
223 "Unexpected number of results");
224 // Generic legalization: just pass the operand through.
225 for (unsigned i = 0, e = Op->getNumValues(); i != e; ++i)
226 AddLegalizedOperand(Op.getValue(i), SDValue(Result, i));
227 return SDValue(Result, Op.getResNo());
228 }
229
230 SDValue
RecursivelyLegalizeResults(SDValue Op,MutableArrayRef<SDValue> Results)231 VectorLegalizer::RecursivelyLegalizeResults(SDValue Op,
232 MutableArrayRef<SDValue> Results) {
233 assert(Results.size() == Op->getNumValues() &&
234 "Unexpected number of results");
235 // Make sure that the generated code is itself legal.
236 for (unsigned i = 0, e = Results.size(); i != e; ++i) {
237 Results[i] = LegalizeOp(Results[i]);
238 AddLegalizedOperand(Op.getValue(i), Results[i]);
239 }
240
241 return Results[Op.getResNo()];
242 }
243
LegalizeOp(SDValue Op)244 SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
245 // Note that LegalizeOp may be reentered even from single-use nodes, which
246 // means that we always must cache transformed nodes.
247 DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op);
248 if (I != LegalizedNodes.end()) return I->second;
249
250 // Legalize the operands
251 SmallVector<SDValue, 8> Ops;
252 for (const SDValue &Oper : Op->op_values())
253 Ops.push_back(LegalizeOp(Oper));
254
255 SDNode *Node = DAG.UpdateNodeOperands(Op.getNode(), Ops);
256
257 if (Op.getOpcode() == ISD::LOAD) {
258 LoadSDNode *LD = cast<LoadSDNode>(Node);
259 ISD::LoadExtType ExtType = LD->getExtensionType();
260 if (LD->getMemoryVT().isVector() && ExtType != ISD::NON_EXTLOAD) {
261 LLVM_DEBUG(dbgs() << "\nLegalizing extending vector load: ";
262 Node->dump(&DAG));
263 switch (TLI.getLoadExtAction(LD->getExtensionType(), LD->getValueType(0),
264 LD->getMemoryVT())) {
265 default: llvm_unreachable("This action is not supported yet!");
266 case TargetLowering::Legal:
267 return TranslateLegalizeResults(Op, Node);
268 case TargetLowering::Custom: {
269 SmallVector<SDValue, 2> ResultVals;
270 if (LowerOperationWrapper(Node, ResultVals)) {
271 if (ResultVals.empty())
272 return TranslateLegalizeResults(Op, Node);
273
274 Changed = true;
275 return RecursivelyLegalizeResults(Op, ResultVals);
276 }
277 LLVM_FALLTHROUGH;
278 }
279 case TargetLowering::Expand: {
280 Changed = true;
281 std::pair<SDValue, SDValue> Tmp = ExpandLoad(Node);
282 AddLegalizedOperand(Op.getValue(0), Tmp.first);
283 AddLegalizedOperand(Op.getValue(1), Tmp.second);
284 return Op.getResNo() ? Tmp.first : Tmp.second;
285 }
286 }
287 }
288 } else if (Op.getOpcode() == ISD::STORE) {
289 StoreSDNode *ST = cast<StoreSDNode>(Node);
290 EVT StVT = ST->getMemoryVT();
291 MVT ValVT = ST->getValue().getSimpleValueType();
292 if (StVT.isVector() && ST->isTruncatingStore()) {
293 LLVM_DEBUG(dbgs() << "\nLegalizing truncating vector store: ";
294 Node->dump(&DAG));
295 switch (TLI.getTruncStoreAction(ValVT, StVT)) {
296 default: llvm_unreachable("This action is not supported yet!");
297 case TargetLowering::Legal:
298 return TranslateLegalizeResults(Op, Node);
299 case TargetLowering::Custom: {
300 SmallVector<SDValue, 1> ResultVals;
301 if (LowerOperationWrapper(Node, ResultVals)) {
302 if (ResultVals.empty())
303 return TranslateLegalizeResults(Op, Node);
304
305 Changed = true;
306 return RecursivelyLegalizeResults(Op, ResultVals);
307 }
308 LLVM_FALLTHROUGH;
309 }
310 case TargetLowering::Expand: {
311 Changed = true;
312 SDValue Chain = ExpandStore(Node);
313 AddLegalizedOperand(Op, Chain);
314 return Chain;
315 }
316 }
317 }
318 }
319
320 bool HasVectorValueOrOp =
321 llvm::any_of(Node->values(), [](EVT T) { return T.isVector(); }) ||
322 llvm::any_of(Node->op_values(),
323 [](SDValue O) { return O.getValueType().isVector(); });
324 if (!HasVectorValueOrOp)
325 return TranslateLegalizeResults(Op, Node);
326
327 TargetLowering::LegalizeAction Action = TargetLowering::Legal;
328 EVT ValVT;
329 switch (Op.getOpcode()) {
330 default:
331 return TranslateLegalizeResults(Op, Node);
332 case ISD::MERGE_VALUES:
333 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
334 // This operation lies about being legal: when it claims to be legal,
335 // it should actually be expanded.
336 if (Action == TargetLowering::Legal)
337 Action = TargetLowering::Expand;
338 break;
339 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
340 case ISD::STRICT_##DAGN:
341 #include "llvm/IR/ConstrainedOps.def"
342 ValVT = Node->getValueType(0);
343 if (Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
344 Op.getOpcode() == ISD::STRICT_UINT_TO_FP)
345 ValVT = Node->getOperand(1).getValueType();
346 Action = TLI.getOperationAction(Node->getOpcode(), ValVT);
347 // If we're asked to expand a strict vector floating-point operation,
348 // by default we're going to simply unroll it. That is usually the
349 // best approach, except in the case where the resulting strict (scalar)
350 // operations would themselves use the fallback mutation to non-strict.
351 // In that specific case, just do the fallback on the vector op.
352 if (Action == TargetLowering::Expand && !TLI.isStrictFPEnabled() &&
353 TLI.getStrictFPOperationAction(Node->getOpcode(), ValVT) ==
354 TargetLowering::Legal) {
355 EVT EltVT = ValVT.getVectorElementType();
356 if (TLI.getOperationAction(Node->getOpcode(), EltVT)
357 == TargetLowering::Expand &&
358 TLI.getStrictFPOperationAction(Node->getOpcode(), EltVT)
359 == TargetLowering::Legal)
360 Action = TargetLowering::Legal;
361 }
362 break;
363 case ISD::ADD:
364 case ISD::SUB:
365 case ISD::MUL:
366 case ISD::MULHS:
367 case ISD::MULHU:
368 case ISD::SDIV:
369 case ISD::UDIV:
370 case ISD::SREM:
371 case ISD::UREM:
372 case ISD::SDIVREM:
373 case ISD::UDIVREM:
374 case ISD::FADD:
375 case ISD::FSUB:
376 case ISD::FMUL:
377 case ISD::FDIV:
378 case ISD::FREM:
379 case ISD::AND:
380 case ISD::OR:
381 case ISD::XOR:
382 case ISD::SHL:
383 case ISD::SRA:
384 case ISD::SRL:
385 case ISD::FSHL:
386 case ISD::FSHR:
387 case ISD::ROTL:
388 case ISD::ROTR:
389 case ISD::ABS:
390 case ISD::BSWAP:
391 case ISD::BITREVERSE:
392 case ISD::CTLZ:
393 case ISD::CTTZ:
394 case ISD::CTLZ_ZERO_UNDEF:
395 case ISD::CTTZ_ZERO_UNDEF:
396 case ISD::CTPOP:
397 case ISD::SELECT:
398 case ISD::VSELECT:
399 case ISD::SELECT_CC:
400 case ISD::ZERO_EXTEND:
401 case ISD::ANY_EXTEND:
402 case ISD::TRUNCATE:
403 case ISD::SIGN_EXTEND:
404 case ISD::FP_TO_SINT:
405 case ISD::FP_TO_UINT:
406 case ISD::FNEG:
407 case ISD::FABS:
408 case ISD::FMINNUM:
409 case ISD::FMAXNUM:
410 case ISD::FMINNUM_IEEE:
411 case ISD::FMAXNUM_IEEE:
412 case ISD::FMINIMUM:
413 case ISD::FMAXIMUM:
414 case ISD::FCOPYSIGN:
415 case ISD::FSQRT:
416 case ISD::FSIN:
417 case ISD::FCOS:
418 case ISD::FPOWI:
419 case ISD::FPOW:
420 case ISD::FLOG:
421 case ISD::FLOG2:
422 case ISD::FLOG10:
423 case ISD::FEXP:
424 case ISD::FEXP2:
425 case ISD::FCEIL:
426 case ISD::FTRUNC:
427 case ISD::FRINT:
428 case ISD::FNEARBYINT:
429 case ISD::FROUND:
430 case ISD::FROUNDEVEN:
431 case ISD::FFLOOR:
432 case ISD::FP_ROUND:
433 case ISD::FP_EXTEND:
434 case ISD::FMA:
435 case ISD::SIGN_EXTEND_INREG:
436 case ISD::ANY_EXTEND_VECTOR_INREG:
437 case ISD::SIGN_EXTEND_VECTOR_INREG:
438 case ISD::ZERO_EXTEND_VECTOR_INREG:
439 case ISD::SMIN:
440 case ISD::SMAX:
441 case ISD::UMIN:
442 case ISD::UMAX:
443 case ISD::SMUL_LOHI:
444 case ISD::UMUL_LOHI:
445 case ISD::SADDO:
446 case ISD::UADDO:
447 case ISD::SSUBO:
448 case ISD::USUBO:
449 case ISD::SMULO:
450 case ISD::UMULO:
451 case ISD::FCANONICALIZE:
452 case ISD::SADDSAT:
453 case ISD::UADDSAT:
454 case ISD::SSUBSAT:
455 case ISD::USUBSAT:
456 case ISD::SSHLSAT:
457 case ISD::USHLSAT:
458 case ISD::FP_TO_SINT_SAT:
459 case ISD::FP_TO_UINT_SAT:
460 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
461 break;
462 case ISD::SMULFIX:
463 case ISD::SMULFIXSAT:
464 case ISD::UMULFIX:
465 case ISD::UMULFIXSAT:
466 case ISD::SDIVFIX:
467 case ISD::SDIVFIXSAT:
468 case ISD::UDIVFIX:
469 case ISD::UDIVFIXSAT: {
470 unsigned Scale = Node->getConstantOperandVal(2);
471 Action = TLI.getFixedPointOperationAction(Node->getOpcode(),
472 Node->getValueType(0), Scale);
473 break;
474 }
475 case ISD::SINT_TO_FP:
476 case ISD::UINT_TO_FP:
477 case ISD::VECREDUCE_ADD:
478 case ISD::VECREDUCE_MUL:
479 case ISD::VECREDUCE_AND:
480 case ISD::VECREDUCE_OR:
481 case ISD::VECREDUCE_XOR:
482 case ISD::VECREDUCE_SMAX:
483 case ISD::VECREDUCE_SMIN:
484 case ISD::VECREDUCE_UMAX:
485 case ISD::VECREDUCE_UMIN:
486 case ISD::VECREDUCE_FADD:
487 case ISD::VECREDUCE_FMUL:
488 case ISD::VECREDUCE_FMAX:
489 case ISD::VECREDUCE_FMIN:
490 Action = TLI.getOperationAction(Node->getOpcode(),
491 Node->getOperand(0).getValueType());
492 break;
493 case ISD::VECREDUCE_SEQ_FADD:
494 case ISD::VECREDUCE_SEQ_FMUL:
495 Action = TLI.getOperationAction(Node->getOpcode(),
496 Node->getOperand(1).getValueType());
497 break;
498 case ISD::SETCC: {
499 MVT OpVT = Node->getOperand(0).getSimpleValueType();
500 ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(2))->get();
501 Action = TLI.getCondCodeAction(CCCode, OpVT);
502 if (Action == TargetLowering::Legal)
503 Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
504 break;
505 }
506 }
507
508 LLVM_DEBUG(dbgs() << "\nLegalizing vector op: "; Node->dump(&DAG));
509
510 SmallVector<SDValue, 8> ResultVals;
511 switch (Action) {
512 default: llvm_unreachable("This action is not supported yet!");
513 case TargetLowering::Promote:
514 LLVM_DEBUG(dbgs() << "Promoting\n");
515 Promote(Node, ResultVals);
516 assert(!ResultVals.empty() && "No results for promotion?");
517 break;
518 case TargetLowering::Legal:
519 LLVM_DEBUG(dbgs() << "Legal node: nothing to do\n");
520 break;
521 case TargetLowering::Custom:
522 LLVM_DEBUG(dbgs() << "Trying custom legalization\n");
523 if (LowerOperationWrapper(Node, ResultVals))
524 break;
525 LLVM_DEBUG(dbgs() << "Could not custom legalize node\n");
526 LLVM_FALLTHROUGH;
527 case TargetLowering::Expand:
528 LLVM_DEBUG(dbgs() << "Expanding\n");
529 Expand(Node, ResultVals);
530 break;
531 }
532
533 if (ResultVals.empty())
534 return TranslateLegalizeResults(Op, Node);
535
536 Changed = true;
537 return RecursivelyLegalizeResults(Op, ResultVals);
538 }
539
540 // FIXME: This is very similar to the X86 override of
541 // TargetLowering::LowerOperationWrapper. Can we merge them somehow?
LowerOperationWrapper(SDNode * Node,SmallVectorImpl<SDValue> & Results)542 bool VectorLegalizer::LowerOperationWrapper(SDNode *Node,
543 SmallVectorImpl<SDValue> &Results) {
544 SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
545
546 if (!Res.getNode())
547 return false;
548
549 if (Res == SDValue(Node, 0))
550 return true;
551
552 // If the original node has one result, take the return value from
553 // LowerOperation as is. It might not be result number 0.
554 if (Node->getNumValues() == 1) {
555 Results.push_back(Res);
556 return true;
557 }
558
559 // If the original node has multiple results, then the return node should
560 // have the same number of results.
561 assert((Node->getNumValues() == Res->getNumValues()) &&
562 "Lowering returned the wrong number of results!");
563
564 // Places new result values base on N result number.
565 for (unsigned I = 0, E = Node->getNumValues(); I != E; ++I)
566 Results.push_back(Res.getValue(I));
567
568 return true;
569 }
570
Promote(SDNode * Node,SmallVectorImpl<SDValue> & Results)571 void VectorLegalizer::Promote(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
572 // For a few operations there is a specific concept for promotion based on
573 // the operand's type.
574 switch (Node->getOpcode()) {
575 case ISD::SINT_TO_FP:
576 case ISD::UINT_TO_FP:
577 case ISD::STRICT_SINT_TO_FP:
578 case ISD::STRICT_UINT_TO_FP:
579 // "Promote" the operation by extending the operand.
580 PromoteINT_TO_FP(Node, Results);
581 return;
582 case ISD::FP_TO_UINT:
583 case ISD::FP_TO_SINT:
584 case ISD::STRICT_FP_TO_UINT:
585 case ISD::STRICT_FP_TO_SINT:
586 // Promote the operation by extending the operand.
587 PromoteFP_TO_INT(Node, Results);
588 return;
589 case ISD::FP_ROUND:
590 case ISD::FP_EXTEND:
591 // These operations are used to do promotion so they can't be promoted
592 // themselves.
593 llvm_unreachable("Don't know how to promote this operation!");
594 }
595
596 // There are currently two cases of vector promotion:
597 // 1) Bitcasting a vector of integers to a different type to a vector of the
598 // same overall length. For example, x86 promotes ISD::AND v2i32 to v1i64.
599 // 2) Extending a vector of floats to a vector of the same number of larger
600 // floats. For example, AArch64 promotes ISD::FADD on v4f16 to v4f32.
601 assert(Node->getNumValues() == 1 &&
602 "Can't promote a vector with multiple results!");
603 MVT VT = Node->getSimpleValueType(0);
604 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
605 SDLoc dl(Node);
606 SmallVector<SDValue, 4> Operands(Node->getNumOperands());
607
608 for (unsigned j = 0; j != Node->getNumOperands(); ++j) {
609 if (Node->getOperand(j).getValueType().isVector())
610 if (Node->getOperand(j)
611 .getValueType()
612 .getVectorElementType()
613 .isFloatingPoint() &&
614 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint())
615 Operands[j] = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Node->getOperand(j));
616 else
617 Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(j));
618 else
619 Operands[j] = Node->getOperand(j);
620 }
621
622 SDValue Res =
623 DAG.getNode(Node->getOpcode(), dl, NVT, Operands, Node->getFlags());
624
625 if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) ||
626 (VT.isVector() && VT.getVectorElementType().isFloatingPoint() &&
627 NVT.isVector() && NVT.getVectorElementType().isFloatingPoint()))
628 Res = DAG.getNode(ISD::FP_ROUND, dl, VT, Res, DAG.getIntPtrConstant(0, dl));
629 else
630 Res = DAG.getNode(ISD::BITCAST, dl, VT, Res);
631
632 Results.push_back(Res);
633 }
634
PromoteINT_TO_FP(SDNode * Node,SmallVectorImpl<SDValue> & Results)635 void VectorLegalizer::PromoteINT_TO_FP(SDNode *Node,
636 SmallVectorImpl<SDValue> &Results) {
637 // INT_TO_FP operations may require the input operand be promoted even
638 // when the type is otherwise legal.
639 bool IsStrict = Node->isStrictFPOpcode();
640 MVT VT = Node->getOperand(IsStrict ? 1 : 0).getSimpleValueType();
641 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
642 assert(NVT.getVectorNumElements() == VT.getVectorNumElements() &&
643 "Vectors have different number of elements!");
644
645 SDLoc dl(Node);
646 SmallVector<SDValue, 4> Operands(Node->getNumOperands());
647
648 unsigned Opc = (Node->getOpcode() == ISD::UINT_TO_FP ||
649 Node->getOpcode() == ISD::STRICT_UINT_TO_FP)
650 ? ISD::ZERO_EXTEND
651 : ISD::SIGN_EXTEND;
652 for (unsigned j = 0; j != Node->getNumOperands(); ++j) {
653 if (Node->getOperand(j).getValueType().isVector())
654 Operands[j] = DAG.getNode(Opc, dl, NVT, Node->getOperand(j));
655 else
656 Operands[j] = Node->getOperand(j);
657 }
658
659 if (IsStrict) {
660 SDValue Res = DAG.getNode(Node->getOpcode(), dl,
661 {Node->getValueType(0), MVT::Other}, Operands);
662 Results.push_back(Res);
663 Results.push_back(Res.getValue(1));
664 return;
665 }
666
667 SDValue Res =
668 DAG.getNode(Node->getOpcode(), dl, Node->getValueType(0), Operands);
669 Results.push_back(Res);
670 }
671
672 // For FP_TO_INT we promote the result type to a vector type with wider
673 // elements and then truncate the result. This is different from the default
674 // PromoteVector which uses bitcast to promote thus assumning that the
675 // promoted vector type has the same overall size.
PromoteFP_TO_INT(SDNode * Node,SmallVectorImpl<SDValue> & Results)676 void VectorLegalizer::PromoteFP_TO_INT(SDNode *Node,
677 SmallVectorImpl<SDValue> &Results) {
678 MVT VT = Node->getSimpleValueType(0);
679 MVT NVT = TLI.getTypeToPromoteTo(Node->getOpcode(), VT);
680 bool IsStrict = Node->isStrictFPOpcode();
681 assert(NVT.getVectorNumElements() == VT.getVectorNumElements() &&
682 "Vectors have different number of elements!");
683
684 unsigned NewOpc = Node->getOpcode();
685 // Change FP_TO_UINT to FP_TO_SINT if possible.
686 // TODO: Should we only do this if FP_TO_UINT itself isn't legal?
687 if (NewOpc == ISD::FP_TO_UINT &&
688 TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NVT))
689 NewOpc = ISD::FP_TO_SINT;
690
691 if (NewOpc == ISD::STRICT_FP_TO_UINT &&
692 TLI.isOperationLegalOrCustom(ISD::STRICT_FP_TO_SINT, NVT))
693 NewOpc = ISD::STRICT_FP_TO_SINT;
694
695 SDLoc dl(Node);
696 SDValue Promoted, Chain;
697 if (IsStrict) {
698 Promoted = DAG.getNode(NewOpc, dl, {NVT, MVT::Other},
699 {Node->getOperand(0), Node->getOperand(1)});
700 Chain = Promoted.getValue(1);
701 } else
702 Promoted = DAG.getNode(NewOpc, dl, NVT, Node->getOperand(0));
703
704 // Assert that the converted value fits in the original type. If it doesn't
705 // (eg: because the value being converted is too big), then the result of the
706 // original operation was undefined anyway, so the assert is still correct.
707 if (Node->getOpcode() == ISD::FP_TO_UINT ||
708 Node->getOpcode() == ISD::STRICT_FP_TO_UINT)
709 NewOpc = ISD::AssertZext;
710 else
711 NewOpc = ISD::AssertSext;
712
713 Promoted = DAG.getNode(NewOpc, dl, NVT, Promoted,
714 DAG.getValueType(VT.getScalarType()));
715 Promoted = DAG.getNode(ISD::TRUNCATE, dl, VT, Promoted);
716 Results.push_back(Promoted);
717 if (IsStrict)
718 Results.push_back(Chain);
719 }
720
ExpandLoad(SDNode * N)721 std::pair<SDValue, SDValue> VectorLegalizer::ExpandLoad(SDNode *N) {
722 LoadSDNode *LD = cast<LoadSDNode>(N);
723 return TLI.scalarizeVectorLoad(LD, DAG);
724 }
725
ExpandStore(SDNode * N)726 SDValue VectorLegalizer::ExpandStore(SDNode *N) {
727 StoreSDNode *ST = cast<StoreSDNode>(N);
728 SDValue TF = TLI.scalarizeVectorStore(ST, DAG);
729 return TF;
730 }
731
Expand(SDNode * Node,SmallVectorImpl<SDValue> & Results)732 void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
733 SDValue Tmp;
734 switch (Node->getOpcode()) {
735 case ISD::MERGE_VALUES:
736 for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
737 Results.push_back(Node->getOperand(i));
738 return;
739 case ISD::SIGN_EXTEND_INREG:
740 Results.push_back(ExpandSEXTINREG(Node));
741 return;
742 case ISD::ANY_EXTEND_VECTOR_INREG:
743 Results.push_back(ExpandANY_EXTEND_VECTOR_INREG(Node));
744 return;
745 case ISD::SIGN_EXTEND_VECTOR_INREG:
746 Results.push_back(ExpandSIGN_EXTEND_VECTOR_INREG(Node));
747 return;
748 case ISD::ZERO_EXTEND_VECTOR_INREG:
749 Results.push_back(ExpandZERO_EXTEND_VECTOR_INREG(Node));
750 return;
751 case ISD::BSWAP:
752 Results.push_back(ExpandBSWAP(Node));
753 return;
754 case ISD::VSELECT:
755 Results.push_back(ExpandVSELECT(Node));
756 return;
757 case ISD::SELECT:
758 Results.push_back(ExpandSELECT(Node));
759 return;
760 case ISD::FP_TO_UINT:
761 ExpandFP_TO_UINT(Node, Results);
762 return;
763 case ISD::UINT_TO_FP:
764 ExpandUINT_TO_FLOAT(Node, Results);
765 return;
766 case ISD::FNEG:
767 Results.push_back(ExpandFNEG(Node));
768 return;
769 case ISD::FSUB:
770 ExpandFSUB(Node, Results);
771 return;
772 case ISD::SETCC:
773 ExpandSETCC(Node, Results);
774 return;
775 case ISD::ABS:
776 if (TLI.expandABS(Node, Tmp, DAG)) {
777 Results.push_back(Tmp);
778 return;
779 }
780 break;
781 case ISD::BITREVERSE:
782 ExpandBITREVERSE(Node, Results);
783 return;
784 case ISD::CTPOP:
785 if (TLI.expandCTPOP(Node, Tmp, DAG)) {
786 Results.push_back(Tmp);
787 return;
788 }
789 break;
790 case ISD::CTLZ:
791 case ISD::CTLZ_ZERO_UNDEF:
792 if (TLI.expandCTLZ(Node, Tmp, DAG)) {
793 Results.push_back(Tmp);
794 return;
795 }
796 break;
797 case ISD::CTTZ:
798 case ISD::CTTZ_ZERO_UNDEF:
799 if (TLI.expandCTTZ(Node, Tmp, DAG)) {
800 Results.push_back(Tmp);
801 return;
802 }
803 break;
804 case ISD::FSHL:
805 case ISD::FSHR:
806 if (TLI.expandFunnelShift(Node, Tmp, DAG)) {
807 Results.push_back(Tmp);
808 return;
809 }
810 break;
811 case ISD::ROTL:
812 case ISD::ROTR:
813 if (TLI.expandROT(Node, false /*AllowVectorOps*/, Tmp, DAG)) {
814 Results.push_back(Tmp);
815 return;
816 }
817 break;
818 case ISD::FMINNUM:
819 case ISD::FMAXNUM:
820 if (SDValue Expanded = TLI.expandFMINNUM_FMAXNUM(Node, DAG)) {
821 Results.push_back(Expanded);
822 return;
823 }
824 break;
825 case ISD::SMIN:
826 case ISD::SMAX:
827 case ISD::UMIN:
828 case ISD::UMAX:
829 if (SDValue Expanded = TLI.expandIntMINMAX(Node, DAG)) {
830 Results.push_back(Expanded);
831 return;
832 }
833 break;
834 case ISD::UADDO:
835 case ISD::USUBO:
836 ExpandUADDSUBO(Node, Results);
837 return;
838 case ISD::SADDO:
839 case ISD::SSUBO:
840 ExpandSADDSUBO(Node, Results);
841 return;
842 case ISD::UMULO:
843 case ISD::SMULO:
844 ExpandMULO(Node, Results);
845 return;
846 case ISD::USUBSAT:
847 case ISD::SSUBSAT:
848 case ISD::UADDSAT:
849 case ISD::SADDSAT:
850 if (SDValue Expanded = TLI.expandAddSubSat(Node, DAG)) {
851 Results.push_back(Expanded);
852 return;
853 }
854 break;
855 case ISD::SMULFIX:
856 case ISD::UMULFIX:
857 if (SDValue Expanded = TLI.expandFixedPointMul(Node, DAG)) {
858 Results.push_back(Expanded);
859 return;
860 }
861 break;
862 case ISD::SMULFIXSAT:
863 case ISD::UMULFIXSAT:
864 // FIXME: We do not expand SMULFIXSAT/UMULFIXSAT here yet, not sure exactly
865 // why. Maybe it results in worse codegen compared to the unroll for some
866 // targets? This should probably be investigated. And if we still prefer to
867 // unroll an explanation could be helpful.
868 break;
869 case ISD::SDIVFIX:
870 case ISD::UDIVFIX:
871 ExpandFixedPointDiv(Node, Results);
872 return;
873 case ISD::SDIVFIXSAT:
874 case ISD::UDIVFIXSAT:
875 break;
876 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
877 case ISD::STRICT_##DAGN:
878 #include "llvm/IR/ConstrainedOps.def"
879 ExpandStrictFPOp(Node, Results);
880 return;
881 case ISD::VECREDUCE_ADD:
882 case ISD::VECREDUCE_MUL:
883 case ISD::VECREDUCE_AND:
884 case ISD::VECREDUCE_OR:
885 case ISD::VECREDUCE_XOR:
886 case ISD::VECREDUCE_SMAX:
887 case ISD::VECREDUCE_SMIN:
888 case ISD::VECREDUCE_UMAX:
889 case ISD::VECREDUCE_UMIN:
890 case ISD::VECREDUCE_FADD:
891 case ISD::VECREDUCE_FMUL:
892 case ISD::VECREDUCE_FMAX:
893 case ISD::VECREDUCE_FMIN:
894 Results.push_back(TLI.expandVecReduce(Node, DAG));
895 return;
896 case ISD::VECREDUCE_SEQ_FADD:
897 case ISD::VECREDUCE_SEQ_FMUL:
898 Results.push_back(TLI.expandVecReduceSeq(Node, DAG));
899 return;
900 case ISD::SREM:
901 case ISD::UREM:
902 ExpandREM(Node, Results);
903 return;
904 }
905
906 Results.push_back(DAG.UnrollVectorOp(Node));
907 }
908
ExpandSELECT(SDNode * Node)909 SDValue VectorLegalizer::ExpandSELECT(SDNode *Node) {
910 // Lower a select instruction where the condition is a scalar and the
911 // operands are vectors. Lower this select to VSELECT and implement it
912 // using XOR AND OR. The selector bit is broadcasted.
913 EVT VT = Node->getValueType(0);
914 SDLoc DL(Node);
915
916 SDValue Mask = Node->getOperand(0);
917 SDValue Op1 = Node->getOperand(1);
918 SDValue Op2 = Node->getOperand(2);
919
920 assert(VT.isVector() && !Mask.getValueType().isVector()
921 && Op1.getValueType() == Op2.getValueType() && "Invalid type");
922
923 // If we can't even use the basic vector operations of
924 // AND,OR,XOR, we will have to scalarize the op.
925 // Notice that the operation may be 'promoted' which means that it is
926 // 'bitcasted' to another type which is handled.
927 // Also, we need to be able to construct a splat vector using either
928 // BUILD_VECTOR or SPLAT_VECTOR.
929 // FIXME: Should we also permit fixed-length SPLAT_VECTOR as a fallback to
930 // BUILD_VECTOR?
931 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
932 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
933 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
934 TLI.getOperationAction(VT.isFixedLengthVector() ? ISD::BUILD_VECTOR
935 : ISD::SPLAT_VECTOR,
936 VT) == TargetLowering::Expand)
937 return DAG.UnrollVectorOp(Node);
938
939 // Generate a mask operand.
940 EVT MaskTy = VT.changeVectorElementTypeToInteger();
941
942 // What is the size of each element in the vector mask.
943 EVT BitTy = MaskTy.getScalarType();
944
945 Mask = DAG.getSelect(DL, BitTy, Mask,
946 DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), DL,
947 BitTy),
948 DAG.getConstant(0, DL, BitTy));
949
950 // Broadcast the mask so that the entire vector is all one or all zero.
951 if (VT.isFixedLengthVector())
952 Mask = DAG.getSplatBuildVector(MaskTy, DL, Mask);
953 else
954 Mask = DAG.getSplatVector(MaskTy, DL, Mask);
955
956 // Bitcast the operands to be the same type as the mask.
957 // This is needed when we select between FP types because
958 // the mask is a vector of integers.
959 Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1);
960 Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2);
961
962 SDValue AllOnes = DAG.getConstant(
963 APInt::getAllOnesValue(BitTy.getSizeInBits()), DL, MaskTy);
964 SDValue NotMask = DAG.getNode(ISD::XOR, DL, MaskTy, Mask, AllOnes);
965
966 Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask);
967 Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask);
968 SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2);
969 return DAG.getNode(ISD::BITCAST, DL, Node->getValueType(0), Val);
970 }
971
ExpandSEXTINREG(SDNode * Node)972 SDValue VectorLegalizer::ExpandSEXTINREG(SDNode *Node) {
973 EVT VT = Node->getValueType(0);
974
975 // Make sure that the SRA and SHL instructions are available.
976 if (TLI.getOperationAction(ISD::SRA, VT) == TargetLowering::Expand ||
977 TLI.getOperationAction(ISD::SHL, VT) == TargetLowering::Expand)
978 return DAG.UnrollVectorOp(Node);
979
980 SDLoc DL(Node);
981 EVT OrigTy = cast<VTSDNode>(Node->getOperand(1))->getVT();
982
983 unsigned BW = VT.getScalarSizeInBits();
984 unsigned OrigBW = OrigTy.getScalarSizeInBits();
985 SDValue ShiftSz = DAG.getConstant(BW - OrigBW, DL, VT);
986
987 SDValue Op = DAG.getNode(ISD::SHL, DL, VT, Node->getOperand(0), ShiftSz);
988 return DAG.getNode(ISD::SRA, DL, VT, Op, ShiftSz);
989 }
990
991 // Generically expand a vector anyext in register to a shuffle of the relevant
992 // lanes into the appropriate locations, with other lanes left undef.
ExpandANY_EXTEND_VECTOR_INREG(SDNode * Node)993 SDValue VectorLegalizer::ExpandANY_EXTEND_VECTOR_INREG(SDNode *Node) {
994 SDLoc DL(Node);
995 EVT VT = Node->getValueType(0);
996 int NumElements = VT.getVectorNumElements();
997 SDValue Src = Node->getOperand(0);
998 EVT SrcVT = Src.getValueType();
999 int NumSrcElements = SrcVT.getVectorNumElements();
1000
1001 // *_EXTEND_VECTOR_INREG SrcVT can be smaller than VT - so insert the vector
1002 // into a larger vector type.
1003 if (SrcVT.bitsLE(VT)) {
1004 assert((VT.getSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
1005 "ANY_EXTEND_VECTOR_INREG vector size mismatch");
1006 NumSrcElements = VT.getSizeInBits() / SrcVT.getScalarSizeInBits();
1007 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
1008 NumSrcElements);
1009 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcVT, DAG.getUNDEF(SrcVT),
1010 Src, DAG.getVectorIdxConstant(0, DL));
1011 }
1012
1013 // Build a base mask of undef shuffles.
1014 SmallVector<int, 16> ShuffleMask;
1015 ShuffleMask.resize(NumSrcElements, -1);
1016
1017 // Place the extended lanes into the correct locations.
1018 int ExtLaneScale = NumSrcElements / NumElements;
1019 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
1020 for (int i = 0; i < NumElements; ++i)
1021 ShuffleMask[i * ExtLaneScale + EndianOffset] = i;
1022
1023 return DAG.getNode(
1024 ISD::BITCAST, DL, VT,
1025 DAG.getVectorShuffle(SrcVT, DL, Src, DAG.getUNDEF(SrcVT), ShuffleMask));
1026 }
1027
ExpandSIGN_EXTEND_VECTOR_INREG(SDNode * Node)1028 SDValue VectorLegalizer::ExpandSIGN_EXTEND_VECTOR_INREG(SDNode *Node) {
1029 SDLoc DL(Node);
1030 EVT VT = Node->getValueType(0);
1031 SDValue Src = Node->getOperand(0);
1032 EVT SrcVT = Src.getValueType();
1033
1034 // First build an any-extend node which can be legalized above when we
1035 // recurse through it.
1036 SDValue Op = DAG.getNode(ISD::ANY_EXTEND_VECTOR_INREG, DL, VT, Src);
1037
1038 // Now we need sign extend. Do this by shifting the elements. Even if these
1039 // aren't legal operations, they have a better chance of being legalized
1040 // without full scalarization than the sign extension does.
1041 unsigned EltWidth = VT.getScalarSizeInBits();
1042 unsigned SrcEltWidth = SrcVT.getScalarSizeInBits();
1043 SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT);
1044 return DAG.getNode(ISD::SRA, DL, VT,
1045 DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount),
1046 ShiftAmount);
1047 }
1048
1049 // Generically expand a vector zext in register to a shuffle of the relevant
1050 // lanes into the appropriate locations, a blend of zero into the high bits,
1051 // and a bitcast to the wider element type.
ExpandZERO_EXTEND_VECTOR_INREG(SDNode * Node)1052 SDValue VectorLegalizer::ExpandZERO_EXTEND_VECTOR_INREG(SDNode *Node) {
1053 SDLoc DL(Node);
1054 EVT VT = Node->getValueType(0);
1055 int NumElements = VT.getVectorNumElements();
1056 SDValue Src = Node->getOperand(0);
1057 EVT SrcVT = Src.getValueType();
1058 int NumSrcElements = SrcVT.getVectorNumElements();
1059
1060 // *_EXTEND_VECTOR_INREG SrcVT can be smaller than VT - so insert the vector
1061 // into a larger vector type.
1062 if (SrcVT.bitsLE(VT)) {
1063 assert((VT.getSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
1064 "ZERO_EXTEND_VECTOR_INREG vector size mismatch");
1065 NumSrcElements = VT.getSizeInBits() / SrcVT.getScalarSizeInBits();
1066 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(),
1067 NumSrcElements);
1068 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcVT, DAG.getUNDEF(SrcVT),
1069 Src, DAG.getVectorIdxConstant(0, DL));
1070 }
1071
1072 // Build up a zero vector to blend into this one.
1073 SDValue Zero = DAG.getConstant(0, DL, SrcVT);
1074
1075 // Shuffle the incoming lanes into the correct position, and pull all other
1076 // lanes from the zero vector.
1077 SmallVector<int, 16> ShuffleMask;
1078 ShuffleMask.reserve(NumSrcElements);
1079 for (int i = 0; i < NumSrcElements; ++i)
1080 ShuffleMask.push_back(i);
1081
1082 int ExtLaneScale = NumSrcElements / NumElements;
1083 int EndianOffset = DAG.getDataLayout().isBigEndian() ? ExtLaneScale - 1 : 0;
1084 for (int i = 0; i < NumElements; ++i)
1085 ShuffleMask[i * ExtLaneScale + EndianOffset] = NumSrcElements + i;
1086
1087 return DAG.getNode(ISD::BITCAST, DL, VT,
1088 DAG.getVectorShuffle(SrcVT, DL, Zero, Src, ShuffleMask));
1089 }
1090
createBSWAPShuffleMask(EVT VT,SmallVectorImpl<int> & ShuffleMask)1091 static void createBSWAPShuffleMask(EVT VT, SmallVectorImpl<int> &ShuffleMask) {
1092 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
1093 for (int I = 0, E = VT.getVectorNumElements(); I != E; ++I)
1094 for (int J = ScalarSizeInBytes - 1; J >= 0; --J)
1095 ShuffleMask.push_back((I * ScalarSizeInBytes) + J);
1096 }
1097
ExpandBSWAP(SDNode * Node)1098 SDValue VectorLegalizer::ExpandBSWAP(SDNode *Node) {
1099 EVT VT = Node->getValueType(0);
1100
1101 // Generate a byte wise shuffle mask for the BSWAP.
1102 SmallVector<int, 16> ShuffleMask;
1103 createBSWAPShuffleMask(VT, ShuffleMask);
1104 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, ShuffleMask.size());
1105
1106 // Only emit a shuffle if the mask is legal.
1107 if (!TLI.isShuffleMaskLegal(ShuffleMask, ByteVT))
1108 return DAG.UnrollVectorOp(Node);
1109
1110 SDLoc DL(Node);
1111 SDValue Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Node->getOperand(0));
1112 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT), ShuffleMask);
1113 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
1114 }
1115
ExpandBITREVERSE(SDNode * Node,SmallVectorImpl<SDValue> & Results)1116 void VectorLegalizer::ExpandBITREVERSE(SDNode *Node,
1117 SmallVectorImpl<SDValue> &Results) {
1118 EVT VT = Node->getValueType(0);
1119
1120 // If we have the scalar operation, it's probably cheaper to unroll it.
1121 if (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, VT.getScalarType())) {
1122 SDValue Tmp = DAG.UnrollVectorOp(Node);
1123 Results.push_back(Tmp);
1124 return;
1125 }
1126
1127 // If the vector element width is a whole number of bytes, test if its legal
1128 // to BSWAP shuffle the bytes and then perform the BITREVERSE on the byte
1129 // vector. This greatly reduces the number of bit shifts necessary.
1130 unsigned ScalarSizeInBits = VT.getScalarSizeInBits();
1131 if (ScalarSizeInBits > 8 && (ScalarSizeInBits % 8) == 0) {
1132 SmallVector<int, 16> BSWAPMask;
1133 createBSWAPShuffleMask(VT, BSWAPMask);
1134
1135 EVT ByteVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, BSWAPMask.size());
1136 if (TLI.isShuffleMaskLegal(BSWAPMask, ByteVT) &&
1137 (TLI.isOperationLegalOrCustom(ISD::BITREVERSE, ByteVT) ||
1138 (TLI.isOperationLegalOrCustom(ISD::SHL, ByteVT) &&
1139 TLI.isOperationLegalOrCustom(ISD::SRL, ByteVT) &&
1140 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, ByteVT) &&
1141 TLI.isOperationLegalOrCustomOrPromote(ISD::OR, ByteVT)))) {
1142 SDLoc DL(Node);
1143 SDValue Op = DAG.getNode(ISD::BITCAST, DL, ByteVT, Node->getOperand(0));
1144 Op = DAG.getVectorShuffle(ByteVT, DL, Op, DAG.getUNDEF(ByteVT),
1145 BSWAPMask);
1146 Op = DAG.getNode(ISD::BITREVERSE, DL, ByteVT, Op);
1147 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
1148 Results.push_back(Op);
1149 return;
1150 }
1151 }
1152
1153 // If we have the appropriate vector bit operations, it is better to use them
1154 // than unrolling and expanding each component.
1155 if (TLI.isOperationLegalOrCustom(ISD::SHL, VT) &&
1156 TLI.isOperationLegalOrCustom(ISD::SRL, VT) &&
1157 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT) &&
1158 TLI.isOperationLegalOrCustomOrPromote(ISD::OR, VT))
1159 // Let LegalizeDAG handle this later.
1160 return;
1161
1162 // Otherwise unroll.
1163 SDValue Tmp = DAG.UnrollVectorOp(Node);
1164 Results.push_back(Tmp);
1165 }
1166
ExpandVSELECT(SDNode * Node)1167 SDValue VectorLegalizer::ExpandVSELECT(SDNode *Node) {
1168 // Implement VSELECT in terms of XOR, AND, OR
1169 // on platforms which do not support blend natively.
1170 SDLoc DL(Node);
1171
1172 SDValue Mask = Node->getOperand(0);
1173 SDValue Op1 = Node->getOperand(1);
1174 SDValue Op2 = Node->getOperand(2);
1175
1176 EVT VT = Mask.getValueType();
1177
1178 // If we can't even use the basic vector operations of
1179 // AND,OR,XOR, we will have to scalarize the op.
1180 // Notice that the operation may be 'promoted' which means that it is
1181 // 'bitcasted' to another type which is handled.
1182 // This operation also isn't safe with AND, OR, XOR when the boolean
1183 // type is 0/1 as we need an all ones vector constant to mask with.
1184 // FIXME: Sign extend 1 to all ones if thats legal on the target.
1185 if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand ||
1186 TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand ||
1187 TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand ||
1188 TLI.getBooleanContents(Op1.getValueType()) !=
1189 TargetLowering::ZeroOrNegativeOneBooleanContent)
1190 return DAG.UnrollVectorOp(Node);
1191
1192 // If the mask and the type are different sizes, unroll the vector op. This
1193 // can occur when getSetCCResultType returns something that is different in
1194 // size from the operand types. For example, v4i8 = select v4i32, v4i8, v4i8.
1195 if (VT.getSizeInBits() != Op1.getValueSizeInBits())
1196 return DAG.UnrollVectorOp(Node);
1197
1198 // Bitcast the operands to be the same type as the mask.
1199 // This is needed when we select between FP types because
1200 // the mask is a vector of integers.
1201 Op1 = DAG.getNode(ISD::BITCAST, DL, VT, Op1);
1202 Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2);
1203
1204 SDValue AllOnes = DAG.getConstant(
1205 APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL, VT);
1206 SDValue NotMask = DAG.getNode(ISD::XOR, DL, VT, Mask, AllOnes);
1207
1208 Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask);
1209 Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask);
1210 SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2);
1211 return DAG.getNode(ISD::BITCAST, DL, Node->getValueType(0), Val);
1212 }
1213
ExpandFP_TO_UINT(SDNode * Node,SmallVectorImpl<SDValue> & Results)1214 void VectorLegalizer::ExpandFP_TO_UINT(SDNode *Node,
1215 SmallVectorImpl<SDValue> &Results) {
1216 // Attempt to expand using TargetLowering.
1217 SDValue Result, Chain;
1218 if (TLI.expandFP_TO_UINT(Node, Result, Chain, DAG)) {
1219 Results.push_back(Result);
1220 if (Node->isStrictFPOpcode())
1221 Results.push_back(Chain);
1222 return;
1223 }
1224
1225 // Otherwise go ahead and unroll.
1226 if (Node->isStrictFPOpcode()) {
1227 UnrollStrictFPOp(Node, Results);
1228 return;
1229 }
1230
1231 Results.push_back(DAG.UnrollVectorOp(Node));
1232 }
1233
ExpandUINT_TO_FLOAT(SDNode * Node,SmallVectorImpl<SDValue> & Results)1234 void VectorLegalizer::ExpandUINT_TO_FLOAT(SDNode *Node,
1235 SmallVectorImpl<SDValue> &Results) {
1236 bool IsStrict = Node->isStrictFPOpcode();
1237 unsigned OpNo = IsStrict ? 1 : 0;
1238 SDValue Src = Node->getOperand(OpNo);
1239 EVT VT = Src.getValueType();
1240 SDLoc DL(Node);
1241
1242 // Attempt to expand using TargetLowering.
1243 SDValue Result;
1244 SDValue Chain;
1245 if (TLI.expandUINT_TO_FP(Node, Result, Chain, DAG)) {
1246 Results.push_back(Result);
1247 if (IsStrict)
1248 Results.push_back(Chain);
1249 return;
1250 }
1251
1252 // Make sure that the SINT_TO_FP and SRL instructions are available.
1253 if (((!IsStrict && TLI.getOperationAction(ISD::SINT_TO_FP, VT) ==
1254 TargetLowering::Expand) ||
1255 (IsStrict && TLI.getOperationAction(ISD::STRICT_SINT_TO_FP, VT) ==
1256 TargetLowering::Expand)) ||
1257 TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Expand) {
1258 if (IsStrict) {
1259 UnrollStrictFPOp(Node, Results);
1260 return;
1261 }
1262
1263 Results.push_back(DAG.UnrollVectorOp(Node));
1264 return;
1265 }
1266
1267 unsigned BW = VT.getScalarSizeInBits();
1268 assert((BW == 64 || BW == 32) &&
1269 "Elements in vector-UINT_TO_FP must be 32 or 64 bits wide");
1270
1271 SDValue HalfWord = DAG.getConstant(BW / 2, DL, VT);
1272
1273 // Constants to clear the upper part of the word.
1274 // Notice that we can also use SHL+SHR, but using a constant is slightly
1275 // faster on x86.
1276 uint64_t HWMask = (BW == 64) ? 0x00000000FFFFFFFF : 0x0000FFFF;
1277 SDValue HalfWordMask = DAG.getConstant(HWMask, DL, VT);
1278
1279 // Two to the power of half-word-size.
1280 SDValue TWOHW =
1281 DAG.getConstantFP(1ULL << (BW / 2), DL, Node->getValueType(0));
1282
1283 // Clear upper part of LO, lower HI
1284 SDValue HI = DAG.getNode(ISD::SRL, DL, VT, Src, HalfWord);
1285 SDValue LO = DAG.getNode(ISD::AND, DL, VT, Src, HalfWordMask);
1286
1287 if (IsStrict) {
1288 // Convert hi and lo to floats
1289 // Convert the hi part back to the upper values
1290 // TODO: Can any fast-math-flags be set on these nodes?
1291 SDValue fHI = DAG.getNode(ISD::STRICT_SINT_TO_FP, DL,
1292 {Node->getValueType(0), MVT::Other},
1293 {Node->getOperand(0), HI});
1294 fHI = DAG.getNode(ISD::STRICT_FMUL, DL, {Node->getValueType(0), MVT::Other},
1295 {fHI.getValue(1), fHI, TWOHW});
1296 SDValue fLO = DAG.getNode(ISD::STRICT_SINT_TO_FP, DL,
1297 {Node->getValueType(0), MVT::Other},
1298 {Node->getOperand(0), LO});
1299
1300 SDValue TF = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, fHI.getValue(1),
1301 fLO.getValue(1));
1302
1303 // Add the two halves
1304 SDValue Result =
1305 DAG.getNode(ISD::STRICT_FADD, DL, {Node->getValueType(0), MVT::Other},
1306 {TF, fHI, fLO});
1307
1308 Results.push_back(Result);
1309 Results.push_back(Result.getValue(1));
1310 return;
1311 }
1312
1313 // Convert hi and lo to floats
1314 // Convert the hi part back to the upper values
1315 // TODO: Can any fast-math-flags be set on these nodes?
1316 SDValue fHI = DAG.getNode(ISD::SINT_TO_FP, DL, Node->getValueType(0), HI);
1317 fHI = DAG.getNode(ISD::FMUL, DL, Node->getValueType(0), fHI, TWOHW);
1318 SDValue fLO = DAG.getNode(ISD::SINT_TO_FP, DL, Node->getValueType(0), LO);
1319
1320 // Add the two halves
1321 Results.push_back(
1322 DAG.getNode(ISD::FADD, DL, Node->getValueType(0), fHI, fLO));
1323 }
1324
ExpandFNEG(SDNode * Node)1325 SDValue VectorLegalizer::ExpandFNEG(SDNode *Node) {
1326 if (TLI.isOperationLegalOrCustom(ISD::FSUB, Node->getValueType(0))) {
1327 SDLoc DL(Node);
1328 SDValue Zero = DAG.getConstantFP(-0.0, DL, Node->getValueType(0));
1329 // TODO: If FNEG had fast-math-flags, they'd get propagated to this FSUB.
1330 return DAG.getNode(ISD::FSUB, DL, Node->getValueType(0), Zero,
1331 Node->getOperand(0));
1332 }
1333 return DAG.UnrollVectorOp(Node);
1334 }
1335
ExpandFSUB(SDNode * Node,SmallVectorImpl<SDValue> & Results)1336 void VectorLegalizer::ExpandFSUB(SDNode *Node,
1337 SmallVectorImpl<SDValue> &Results) {
1338 // For floating-point values, (a-b) is the same as a+(-b). If FNEG is legal,
1339 // we can defer this to operation legalization where it will be lowered as
1340 // a+(-b).
1341 EVT VT = Node->getValueType(0);
1342 if (TLI.isOperationLegalOrCustom(ISD::FNEG, VT) &&
1343 TLI.isOperationLegalOrCustom(ISD::FADD, VT))
1344 return; // Defer to LegalizeDAG
1345
1346 SDValue Tmp = DAG.UnrollVectorOp(Node);
1347 Results.push_back(Tmp);
1348 }
1349
ExpandSETCC(SDNode * Node,SmallVectorImpl<SDValue> & Results)1350 void VectorLegalizer::ExpandSETCC(SDNode *Node,
1351 SmallVectorImpl<SDValue> &Results) {
1352 bool NeedInvert = false;
1353 SDLoc dl(Node);
1354 MVT OpVT = Node->getOperand(0).getSimpleValueType();
1355 ISD::CondCode CCCode = cast<CondCodeSDNode>(Node->getOperand(2))->get();
1356
1357 if (TLI.getCondCodeAction(CCCode, OpVT) != TargetLowering::Expand) {
1358 Results.push_back(UnrollVSETCC(Node));
1359 return;
1360 }
1361
1362 SDValue Chain;
1363 SDValue LHS = Node->getOperand(0);
1364 SDValue RHS = Node->getOperand(1);
1365 SDValue CC = Node->getOperand(2);
1366 bool Legalized = TLI.LegalizeSetCCCondCode(DAG, Node->getValueType(0), LHS,
1367 RHS, CC, NeedInvert, dl, Chain);
1368
1369 if (Legalized) {
1370 // If we expanded the SETCC by swapping LHS and RHS, or by inverting the
1371 // condition code, create a new SETCC node.
1372 if (CC.getNode())
1373 LHS = DAG.getNode(ISD::SETCC, dl, Node->getValueType(0), LHS, RHS, CC,
1374 Node->getFlags());
1375
1376 // If we expanded the SETCC by inverting the condition code, then wrap
1377 // the existing SETCC in a NOT to restore the intended condition.
1378 if (NeedInvert)
1379 LHS = DAG.getLogicalNOT(dl, LHS, LHS->getValueType(0));
1380 } else {
1381 // Otherwise, SETCC for the given comparison type must be completely
1382 // illegal; expand it into a SELECT_CC.
1383 EVT VT = Node->getValueType(0);
1384 LHS =
1385 DAG.getNode(ISD::SELECT_CC, dl, VT, LHS, RHS,
1386 DAG.getBoolConstant(true, dl, VT, LHS.getValueType()),
1387 DAG.getBoolConstant(false, dl, VT, LHS.getValueType()), CC);
1388 LHS->setFlags(Node->getFlags());
1389 }
1390
1391 Results.push_back(LHS);
1392 }
1393
ExpandUADDSUBO(SDNode * Node,SmallVectorImpl<SDValue> & Results)1394 void VectorLegalizer::ExpandUADDSUBO(SDNode *Node,
1395 SmallVectorImpl<SDValue> &Results) {
1396 SDValue Result, Overflow;
1397 TLI.expandUADDSUBO(Node, Result, Overflow, DAG);
1398 Results.push_back(Result);
1399 Results.push_back(Overflow);
1400 }
1401
ExpandSADDSUBO(SDNode * Node,SmallVectorImpl<SDValue> & Results)1402 void VectorLegalizer::ExpandSADDSUBO(SDNode *Node,
1403 SmallVectorImpl<SDValue> &Results) {
1404 SDValue Result, Overflow;
1405 TLI.expandSADDSUBO(Node, Result, Overflow, DAG);
1406 Results.push_back(Result);
1407 Results.push_back(Overflow);
1408 }
1409
ExpandMULO(SDNode * Node,SmallVectorImpl<SDValue> & Results)1410 void VectorLegalizer::ExpandMULO(SDNode *Node,
1411 SmallVectorImpl<SDValue> &Results) {
1412 SDValue Result, Overflow;
1413 if (!TLI.expandMULO(Node, Result, Overflow, DAG))
1414 std::tie(Result, Overflow) = DAG.UnrollVectorOverflowOp(Node);
1415
1416 Results.push_back(Result);
1417 Results.push_back(Overflow);
1418 }
1419
ExpandFixedPointDiv(SDNode * Node,SmallVectorImpl<SDValue> & Results)1420 void VectorLegalizer::ExpandFixedPointDiv(SDNode *Node,
1421 SmallVectorImpl<SDValue> &Results) {
1422 SDNode *N = Node;
1423 if (SDValue Expanded = TLI.expandFixedPointDiv(N->getOpcode(), SDLoc(N),
1424 N->getOperand(0), N->getOperand(1), N->getConstantOperandVal(2), DAG))
1425 Results.push_back(Expanded);
1426 }
1427
ExpandStrictFPOp(SDNode * Node,SmallVectorImpl<SDValue> & Results)1428 void VectorLegalizer::ExpandStrictFPOp(SDNode *Node,
1429 SmallVectorImpl<SDValue> &Results) {
1430 if (Node->getOpcode() == ISD::STRICT_UINT_TO_FP) {
1431 ExpandUINT_TO_FLOAT(Node, Results);
1432 return;
1433 }
1434 if (Node->getOpcode() == ISD::STRICT_FP_TO_UINT) {
1435 ExpandFP_TO_UINT(Node, Results);
1436 return;
1437 }
1438
1439 UnrollStrictFPOp(Node, Results);
1440 }
1441
ExpandREM(SDNode * Node,SmallVectorImpl<SDValue> & Results)1442 void VectorLegalizer::ExpandREM(SDNode *Node,
1443 SmallVectorImpl<SDValue> &Results) {
1444 assert((Node->getOpcode() == ISD::SREM || Node->getOpcode() == ISD::UREM) &&
1445 "Expected REM node");
1446
1447 SDValue Result;
1448 if (!TLI.expandREM(Node, Result, DAG))
1449 Result = DAG.UnrollVectorOp(Node);
1450 Results.push_back(Result);
1451 }
1452
UnrollStrictFPOp(SDNode * Node,SmallVectorImpl<SDValue> & Results)1453 void VectorLegalizer::UnrollStrictFPOp(SDNode *Node,
1454 SmallVectorImpl<SDValue> &Results) {
1455 EVT VT = Node->getValueType(0);
1456 EVT EltVT = VT.getVectorElementType();
1457 unsigned NumElems = VT.getVectorNumElements();
1458 unsigned NumOpers = Node->getNumOperands();
1459 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1460
1461 EVT TmpEltVT = EltVT;
1462 if (Node->getOpcode() == ISD::STRICT_FSETCC ||
1463 Node->getOpcode() == ISD::STRICT_FSETCCS)
1464 TmpEltVT = TLI.getSetCCResultType(DAG.getDataLayout(),
1465 *DAG.getContext(), TmpEltVT);
1466
1467 EVT ValueVTs[] = {TmpEltVT, MVT::Other};
1468 SDValue Chain = Node->getOperand(0);
1469 SDLoc dl(Node);
1470
1471 SmallVector<SDValue, 32> OpValues;
1472 SmallVector<SDValue, 32> OpChains;
1473 for (unsigned i = 0; i < NumElems; ++i) {
1474 SmallVector<SDValue, 4> Opers;
1475 SDValue Idx = DAG.getVectorIdxConstant(i, dl);
1476
1477 // The Chain is the first operand.
1478 Opers.push_back(Chain);
1479
1480 // Now process the remaining operands.
1481 for (unsigned j = 1; j < NumOpers; ++j) {
1482 SDValue Oper = Node->getOperand(j);
1483 EVT OperVT = Oper.getValueType();
1484
1485 if (OperVT.isVector())
1486 Oper = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
1487 OperVT.getVectorElementType(), Oper, Idx);
1488
1489 Opers.push_back(Oper);
1490 }
1491
1492 SDValue ScalarOp = DAG.getNode(Node->getOpcode(), dl, ValueVTs, Opers);
1493 SDValue ScalarResult = ScalarOp.getValue(0);
1494 SDValue ScalarChain = ScalarOp.getValue(1);
1495
1496 if (Node->getOpcode() == ISD::STRICT_FSETCC ||
1497 Node->getOpcode() == ISD::STRICT_FSETCCS)
1498 ScalarResult = DAG.getSelect(dl, EltVT, ScalarResult,
1499 DAG.getConstant(APInt::getAllOnesValue
1500 (EltVT.getSizeInBits()), dl, EltVT),
1501 DAG.getConstant(0, dl, EltVT));
1502
1503 OpValues.push_back(ScalarResult);
1504 OpChains.push_back(ScalarChain);
1505 }
1506
1507 SDValue Result = DAG.getBuildVector(VT, dl, OpValues);
1508 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OpChains);
1509
1510 Results.push_back(Result);
1511 Results.push_back(NewChain);
1512 }
1513
UnrollVSETCC(SDNode * Node)1514 SDValue VectorLegalizer::UnrollVSETCC(SDNode *Node) {
1515 EVT VT = Node->getValueType(0);
1516 unsigned NumElems = VT.getVectorNumElements();
1517 EVT EltVT = VT.getVectorElementType();
1518 SDValue LHS = Node->getOperand(0);
1519 SDValue RHS = Node->getOperand(1);
1520 SDValue CC = Node->getOperand(2);
1521 EVT TmpEltVT = LHS.getValueType().getVectorElementType();
1522 SDLoc dl(Node);
1523 SmallVector<SDValue, 8> Ops(NumElems);
1524 for (unsigned i = 0; i < NumElems; ++i) {
1525 SDValue LHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, LHS,
1526 DAG.getVectorIdxConstant(i, dl));
1527 SDValue RHSElem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, TmpEltVT, RHS,
1528 DAG.getVectorIdxConstant(i, dl));
1529 Ops[i] = DAG.getNode(ISD::SETCC, dl,
1530 TLI.getSetCCResultType(DAG.getDataLayout(),
1531 *DAG.getContext(), TmpEltVT),
1532 LHSElem, RHSElem, CC);
1533 Ops[i] = DAG.getSelect(dl, EltVT, Ops[i],
1534 DAG.getConstant(APInt::getAllOnesValue
1535 (EltVT.getSizeInBits()), dl, EltVT),
1536 DAG.getConstant(0, dl, EltVT));
1537 }
1538 return DAG.getBuildVector(VT, dl, Ops);
1539 }
1540
LegalizeVectors()1541 bool SelectionDAG::LegalizeVectors() {
1542 return VectorLegalizer(*this).Run();
1543 }
1544