1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "SelectionDAGBuilder.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/BranchProbabilityInfo.h"
22 #include "llvm/Analysis/ConstantFolding.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/CodeGen/Analysis.h"
25 #include "llvm/CodeGen/FastISel.h"
26 #include "llvm/CodeGen/FunctionLoweringInfo.h"
27 #include "llvm/CodeGen/GCMetadata.h"
28 #include "llvm/CodeGen/GCStrategy.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineJumpTableInfo.h"
33 #include "llvm/CodeGen/MachineModuleInfo.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/SelectionDAG.h"
36 #include "llvm/CodeGen/StackMaps.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugInfo.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalVariable.h"
44 #include "llvm/IR/InlineAsm.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/IR/Intrinsics.h"
48 #include "llvm/IR/LLVMContext.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/Statepoint.h"
51 #include "llvm/MC/MCSymbol.h"
52 #include "llvm/Support/CommandLine.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/MathExtras.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Target/TargetFrameLowering.h"
58 #include "llvm/Target/TargetInstrInfo.h"
59 #include "llvm/Target/TargetIntrinsicInfo.h"
60 #include "llvm/Target/TargetLibraryInfo.h"
61 #include "llvm/Target/TargetLowering.h"
62 #include "llvm/Target/TargetOptions.h"
63 #include "llvm/Target/TargetSelectionDAGInfo.h"
64 #include "llvm/Target/TargetSubtargetInfo.h"
65 #include <algorithm>
66 using namespace llvm;
67
68 #define DEBUG_TYPE "isel"
69
70 /// LimitFloatPrecision - Generate low-precision inline sequences for
71 /// some float libcalls (6, 8 or 12 bits).
72 static unsigned LimitFloatPrecision;
73
74 static cl::opt<unsigned, true>
75 LimitFPPrecision("limit-float-precision",
76 cl::desc("Generate low-precision inline sequences "
77 "for some float libcalls"),
78 cl::location(LimitFloatPrecision),
79 cl::init(0));
80
81 // Limit the width of DAG chains. This is important in general to prevent
82 // prevent DAG-based analysis from blowing up. For example, alias analysis and
83 // load clustering may not complete in reasonable time. It is difficult to
84 // recognize and avoid this situation within each individual analysis, and
85 // future analyses are likely to have the same behavior. Limiting DAG width is
86 // the safe approach, and will be especially important with global DAGs.
87 //
88 // MaxParallelChains default is arbitrarily high to avoid affecting
89 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
90 // sequence over this should have been converted to llvm.memcpy by the
91 // frontend. It easy to induce this behavior with .ll code such as:
92 // %buffer = alloca [4096 x i8]
93 // %data = load [4096 x i8]* %argPtr
94 // store [4096 x i8] %data, [4096 x i8]* %buffer
95 static const unsigned MaxParallelChains = 64;
96
97 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
98 const SDValue *Parts, unsigned NumParts,
99 MVT PartVT, EVT ValueVT, const Value *V);
100
101 /// getCopyFromParts - Create a value that contains the specified legal parts
102 /// combined into the value they represent. If the parts combine to a type
103 /// larger then ValueVT then AssertOp can be used to specify whether the extra
104 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
105 /// (ISD::AssertSext).
getCopyFromParts(SelectionDAG & DAG,SDLoc DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,const Value * V,ISD::NodeType AssertOp=ISD::DELETED_NODE)106 static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
107 const SDValue *Parts,
108 unsigned NumParts, MVT PartVT, EVT ValueVT,
109 const Value *V,
110 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
111 if (ValueVT.isVector())
112 return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
113 PartVT, ValueVT, V);
114
115 assert(NumParts > 0 && "No parts to assemble!");
116 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
117 SDValue Val = Parts[0];
118
119 if (NumParts > 1) {
120 // Assemble the value from multiple parts.
121 if (ValueVT.isInteger()) {
122 unsigned PartBits = PartVT.getSizeInBits();
123 unsigned ValueBits = ValueVT.getSizeInBits();
124
125 // Assemble the power of 2 part.
126 unsigned RoundParts = NumParts & (NumParts - 1) ?
127 1 << Log2_32(NumParts) : NumParts;
128 unsigned RoundBits = PartBits * RoundParts;
129 EVT RoundVT = RoundBits == ValueBits ?
130 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
131 SDValue Lo, Hi;
132
133 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
134
135 if (RoundParts > 2) {
136 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
137 PartVT, HalfVT, V);
138 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
139 RoundParts / 2, PartVT, HalfVT, V);
140 } else {
141 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
142 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
143 }
144
145 if (TLI.isBigEndian())
146 std::swap(Lo, Hi);
147
148 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
149
150 if (RoundParts < NumParts) {
151 // Assemble the trailing non-power-of-2 part.
152 unsigned OddParts = NumParts - RoundParts;
153 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
154 Hi = getCopyFromParts(DAG, DL,
155 Parts + RoundParts, OddParts, PartVT, OddVT, V);
156
157 // Combine the round and odd parts.
158 Lo = Val;
159 if (TLI.isBigEndian())
160 std::swap(Lo, Hi);
161 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
162 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
163 Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
164 DAG.getConstant(Lo.getValueType().getSizeInBits(),
165 TLI.getPointerTy()));
166 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
167 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
168 }
169 } else if (PartVT.isFloatingPoint()) {
170 // FP split into multiple FP parts (for ppcf128)
171 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
172 "Unexpected split");
173 SDValue Lo, Hi;
174 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
175 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
176 if (TLI.hasBigEndianPartOrdering(ValueVT))
177 std::swap(Lo, Hi);
178 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
179 } else {
180 // FP split into integer parts (soft fp)
181 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
182 !PartVT.isVector() && "Unexpected split");
183 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
184 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
185 }
186 }
187
188 // There is now one part, held in Val. Correct it to match ValueVT.
189 EVT PartEVT = Val.getValueType();
190
191 if (PartEVT == ValueVT)
192 return Val;
193
194 if (PartEVT.isInteger() && ValueVT.isInteger()) {
195 if (ValueVT.bitsLT(PartEVT)) {
196 // For a truncate, see if we have any information to
197 // indicate whether the truncated bits will always be
198 // zero or sign-extension.
199 if (AssertOp != ISD::DELETED_NODE)
200 Val = DAG.getNode(AssertOp, DL, PartEVT, Val,
201 DAG.getValueType(ValueVT));
202 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
203 }
204 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
205 }
206
207 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
208 // FP_ROUND's are always exact here.
209 if (ValueVT.bitsLT(Val.getValueType()))
210 return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val,
211 DAG.getTargetConstant(1, TLI.getPointerTy()));
212
213 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
214 }
215
216 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
217 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
218
219 llvm_unreachable("Unknown mismatch!");
220 }
221
diagnosePossiblyInvalidConstraint(LLVMContext & Ctx,const Value * V,const Twine & ErrMsg)222 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
223 const Twine &ErrMsg) {
224 const Instruction *I = dyn_cast_or_null<Instruction>(V);
225 if (!V)
226 return Ctx.emitError(ErrMsg);
227
228 const char *AsmError = ", possible invalid constraint for vector type";
229 if (const CallInst *CI = dyn_cast<CallInst>(I))
230 if (isa<InlineAsm>(CI->getCalledValue()))
231 return Ctx.emitError(I, ErrMsg + AsmError);
232
233 return Ctx.emitError(I, ErrMsg);
234 }
235
236 /// getCopyFromPartsVector - Create a value that contains the specified legal
237 /// parts combined into the value they represent. If the parts combine to a
238 /// type larger then ValueVT then AssertOp can be used to specify whether the
239 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
240 /// ValueVT (ISD::AssertSext).
getCopyFromPartsVector(SelectionDAG & DAG,SDLoc DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,const Value * V)241 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
242 const SDValue *Parts, unsigned NumParts,
243 MVT PartVT, EVT ValueVT, const Value *V) {
244 assert(ValueVT.isVector() && "Not a vector value");
245 assert(NumParts > 0 && "No parts to assemble!");
246 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
247 SDValue Val = Parts[0];
248
249 // Handle a multi-element vector.
250 if (NumParts > 1) {
251 EVT IntermediateVT;
252 MVT RegisterVT;
253 unsigned NumIntermediates;
254 unsigned NumRegs =
255 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
256 NumIntermediates, RegisterVT);
257 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
258 NumParts = NumRegs; // Silence a compiler warning.
259 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
260 assert(RegisterVT == Parts[0].getSimpleValueType() &&
261 "Part type doesn't match part!");
262
263 // Assemble the parts into intermediate operands.
264 SmallVector<SDValue, 8> Ops(NumIntermediates);
265 if (NumIntermediates == NumParts) {
266 // If the register was not expanded, truncate or copy the value,
267 // as appropriate.
268 for (unsigned i = 0; i != NumParts; ++i)
269 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
270 PartVT, IntermediateVT, V);
271 } else if (NumParts > 0) {
272 // If the intermediate type was expanded, build the intermediate
273 // operands from the parts.
274 assert(NumParts % NumIntermediates == 0 &&
275 "Must expand into a divisible number of parts!");
276 unsigned Factor = NumParts / NumIntermediates;
277 for (unsigned i = 0; i != NumIntermediates; ++i)
278 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
279 PartVT, IntermediateVT, V);
280 }
281
282 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
283 // intermediate operands.
284 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
285 : ISD::BUILD_VECTOR,
286 DL, ValueVT, Ops);
287 }
288
289 // There is now one part, held in Val. Correct it to match ValueVT.
290 EVT PartEVT = Val.getValueType();
291
292 if (PartEVT == ValueVT)
293 return Val;
294
295 if (PartEVT.isVector()) {
296 // If the element type of the source/dest vectors are the same, but the
297 // parts vector has more elements than the value vector, then we have a
298 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
299 // elements we want.
300 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
301 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
302 "Cannot narrow, it would be a lossy transformation");
303 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
304 DAG.getConstant(0, TLI.getVectorIdxTy()));
305 }
306
307 // Vector/Vector bitcast.
308 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
309 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
310
311 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
312 "Cannot handle this kind of promotion");
313 // Promoted vector extract
314 bool Smaller = ValueVT.bitsLE(PartEVT);
315 return DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
316 DL, ValueVT, Val);
317
318 }
319
320 // Trivial bitcast if the types are the same size and the destination
321 // vector type is legal.
322 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
323 TLI.isTypeLegal(ValueVT))
324 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
325
326 // Handle cases such as i8 -> <1 x i1>
327 if (ValueVT.getVectorNumElements() != 1) {
328 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
329 "non-trivial scalar-to-vector conversion");
330 return DAG.getUNDEF(ValueVT);
331 }
332
333 if (ValueVT.getVectorNumElements() == 1 &&
334 ValueVT.getVectorElementType() != PartEVT) {
335 bool Smaller = ValueVT.bitsLE(PartEVT);
336 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
337 DL, ValueVT.getScalarType(), Val);
338 }
339
340 return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
341 }
342
343 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc dl,
344 SDValue Val, SDValue *Parts, unsigned NumParts,
345 MVT PartVT, const Value *V);
346
347 /// getCopyToParts - Create a series of nodes that contain the specified value
348 /// split into legal parts. If the parts contain more bits than Val, then, for
349 /// integers, ExtendKind can be used to specify how to generate the extra bits.
getCopyToParts(SelectionDAG & DAG,SDLoc DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,const Value * V,ISD::NodeType ExtendKind=ISD::ANY_EXTEND)350 static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
351 SDValue Val, SDValue *Parts, unsigned NumParts,
352 MVT PartVT, const Value *V,
353 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
354 EVT ValueVT = Val.getValueType();
355
356 // Handle the vector case separately.
357 if (ValueVT.isVector())
358 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
359
360 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
361 unsigned PartBits = PartVT.getSizeInBits();
362 unsigned OrigNumParts = NumParts;
363 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
364
365 if (NumParts == 0)
366 return;
367
368 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
369 EVT PartEVT = PartVT;
370 if (PartEVT == ValueVT) {
371 assert(NumParts == 1 && "No-op copy with multiple parts!");
372 Parts[0] = Val;
373 return;
374 }
375
376 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
377 // If the parts cover more bits than the value has, promote the value.
378 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
379 assert(NumParts == 1 && "Do not know what to promote to!");
380 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
381 } else {
382 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
383 ValueVT.isInteger() &&
384 "Unknown mismatch!");
385 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
386 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
387 if (PartVT == MVT::x86mmx)
388 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
389 }
390 } else if (PartBits == ValueVT.getSizeInBits()) {
391 // Different types of the same size.
392 assert(NumParts == 1 && PartEVT != ValueVT);
393 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
394 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
395 // If the parts cover less bits than value has, truncate the value.
396 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
397 ValueVT.isInteger() &&
398 "Unknown mismatch!");
399 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
400 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
401 if (PartVT == MVT::x86mmx)
402 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
403 }
404
405 // The value may have changed - recompute ValueVT.
406 ValueVT = Val.getValueType();
407 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
408 "Failed to tile the value with PartVT!");
409
410 if (NumParts == 1) {
411 if (PartEVT != ValueVT)
412 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
413 "scalar-to-vector conversion failed");
414
415 Parts[0] = Val;
416 return;
417 }
418
419 // Expand the value into multiple parts.
420 if (NumParts & (NumParts - 1)) {
421 // The number of parts is not a power of 2. Split off and copy the tail.
422 assert(PartVT.isInteger() && ValueVT.isInteger() &&
423 "Do not know what to expand to!");
424 unsigned RoundParts = 1 << Log2_32(NumParts);
425 unsigned RoundBits = RoundParts * PartBits;
426 unsigned OddParts = NumParts - RoundParts;
427 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
428 DAG.getIntPtrConstant(RoundBits));
429 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
430
431 if (TLI.isBigEndian())
432 // The odd parts were reversed by getCopyToParts - unreverse them.
433 std::reverse(Parts + RoundParts, Parts + NumParts);
434
435 NumParts = RoundParts;
436 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
437 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
438 }
439
440 // The number of parts is a power of 2. Repeatedly bisect the value using
441 // EXTRACT_ELEMENT.
442 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
443 EVT::getIntegerVT(*DAG.getContext(),
444 ValueVT.getSizeInBits()),
445 Val);
446
447 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
448 for (unsigned i = 0; i < NumParts; i += StepSize) {
449 unsigned ThisBits = StepSize * PartBits / 2;
450 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
451 SDValue &Part0 = Parts[i];
452 SDValue &Part1 = Parts[i+StepSize/2];
453
454 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
455 ThisVT, Part0, DAG.getIntPtrConstant(1));
456 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
457 ThisVT, Part0, DAG.getIntPtrConstant(0));
458
459 if (ThisBits == PartBits && ThisVT != PartVT) {
460 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
461 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
462 }
463 }
464 }
465
466 if (TLI.isBigEndian())
467 std::reverse(Parts, Parts + OrigNumParts);
468 }
469
470
471 /// getCopyToPartsVector - Create a series of nodes that contain the specified
472 /// value split into legal parts.
getCopyToPartsVector(SelectionDAG & DAG,SDLoc DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,const Value * V)473 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
474 SDValue Val, SDValue *Parts, unsigned NumParts,
475 MVT PartVT, const Value *V) {
476 EVT ValueVT = Val.getValueType();
477 assert(ValueVT.isVector() && "Not a vector");
478 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
479
480 if (NumParts == 1) {
481 EVT PartEVT = PartVT;
482 if (PartEVT == ValueVT) {
483 // Nothing to do.
484 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
485 // Bitconvert vector->vector case.
486 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
487 } else if (PartVT.isVector() &&
488 PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
489 PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
490 EVT ElementVT = PartVT.getVectorElementType();
491 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
492 // undef elements.
493 SmallVector<SDValue, 16> Ops;
494 for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
495 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
496 ElementVT, Val, DAG.getConstant(i,
497 TLI.getVectorIdxTy())));
498
499 for (unsigned i = ValueVT.getVectorNumElements(),
500 e = PartVT.getVectorNumElements(); i != e; ++i)
501 Ops.push_back(DAG.getUNDEF(ElementVT));
502
503 Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, Ops);
504
505 // FIXME: Use CONCAT for 2x -> 4x.
506
507 //SDValue UndefElts = DAG.getUNDEF(VectorTy);
508 //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
509 } else if (PartVT.isVector() &&
510 PartEVT.getVectorElementType().bitsGE(
511 ValueVT.getVectorElementType()) &&
512 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
513
514 // Promoted vector extract
515 bool Smaller = PartEVT.bitsLE(ValueVT);
516 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
517 DL, PartVT, Val);
518 } else{
519 // Vector -> scalar conversion.
520 assert(ValueVT.getVectorNumElements() == 1 &&
521 "Only trivial vector-to-scalar conversions should get here!");
522 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
523 PartVT, Val, DAG.getConstant(0, TLI.getVectorIdxTy()));
524
525 bool Smaller = ValueVT.bitsLE(PartVT);
526 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
527 DL, PartVT, Val);
528 }
529
530 Parts[0] = Val;
531 return;
532 }
533
534 // Handle a multi-element vector.
535 EVT IntermediateVT;
536 MVT RegisterVT;
537 unsigned NumIntermediates;
538 unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
539 IntermediateVT,
540 NumIntermediates, RegisterVT);
541 unsigned NumElements = ValueVT.getVectorNumElements();
542
543 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
544 NumParts = NumRegs; // Silence a compiler warning.
545 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
546
547 // Split the vector into intermediate operands.
548 SmallVector<SDValue, 8> Ops(NumIntermediates);
549 for (unsigned i = 0; i != NumIntermediates; ++i) {
550 if (IntermediateVT.isVector())
551 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
552 IntermediateVT, Val,
553 DAG.getConstant(i * (NumElements / NumIntermediates),
554 TLI.getVectorIdxTy()));
555 else
556 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
557 IntermediateVT, Val,
558 DAG.getConstant(i, TLI.getVectorIdxTy()));
559 }
560
561 // Split the intermediate operands into legal parts.
562 if (NumParts == NumIntermediates) {
563 // If the register was not expanded, promote or copy the value,
564 // as appropriate.
565 for (unsigned i = 0; i != NumParts; ++i)
566 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
567 } else if (NumParts > 0) {
568 // If the intermediate type was expanded, split each the value into
569 // legal parts.
570 assert(NumIntermediates != 0 && "division by zero");
571 assert(NumParts % NumIntermediates == 0 &&
572 "Must expand into a divisible number of parts!");
573 unsigned Factor = NumParts / NumIntermediates;
574 for (unsigned i = 0; i != NumIntermediates; ++i)
575 getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
576 }
577 }
578
579 namespace {
580 /// RegsForValue - This struct represents the registers (physical or virtual)
581 /// that a particular set of values is assigned, and the type information
582 /// about the value. The most common situation is to represent one value at a
583 /// time, but struct or array values are handled element-wise as multiple
584 /// values. The splitting of aggregates is performed recursively, so that we
585 /// never have aggregate-typed registers. The values at this point do not
586 /// necessarily have legal types, so each value may require one or more
587 /// registers of some legal type.
588 ///
589 struct RegsForValue {
590 /// ValueVTs - The value types of the values, which may not be legal, and
591 /// may need be promoted or synthesized from one or more registers.
592 ///
593 SmallVector<EVT, 4> ValueVTs;
594
595 /// RegVTs - The value types of the registers. This is the same size as
596 /// ValueVTs and it records, for each value, what the type of the assigned
597 /// register or registers are. (Individual values are never synthesized
598 /// from more than one type of register.)
599 ///
600 /// With virtual registers, the contents of RegVTs is redundant with TLI's
601 /// getRegisterType member function, however when with physical registers
602 /// it is necessary to have a separate record of the types.
603 ///
604 SmallVector<MVT, 4> RegVTs;
605
606 /// Regs - This list holds the registers assigned to the values.
607 /// Each legal or promoted value requires one register, and each
608 /// expanded value requires multiple registers.
609 ///
610 SmallVector<unsigned, 4> Regs;
611
RegsForValue__anonc5eea8610111::RegsForValue612 RegsForValue() {}
613
RegsForValue__anonc5eea8610111::RegsForValue614 RegsForValue(const SmallVector<unsigned, 4> ®s,
615 MVT regvt, EVT valuevt)
616 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
617
RegsForValue__anonc5eea8610111::RegsForValue618 RegsForValue(LLVMContext &Context, const TargetLowering &tli,
619 unsigned Reg, Type *Ty) {
620 ComputeValueVTs(tli, Ty, ValueVTs);
621
622 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
623 EVT ValueVT = ValueVTs[Value];
624 unsigned NumRegs = tli.getNumRegisters(Context, ValueVT);
625 MVT RegisterVT = tli.getRegisterType(Context, ValueVT);
626 for (unsigned i = 0; i != NumRegs; ++i)
627 Regs.push_back(Reg + i);
628 RegVTs.push_back(RegisterVT);
629 Reg += NumRegs;
630 }
631 }
632
633 /// append - Add the specified values to this one.
append__anonc5eea8610111::RegsForValue634 void append(const RegsForValue &RHS) {
635 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
636 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
637 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
638 }
639
640 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
641 /// this value and returns the result as a ValueVTs value. This uses
642 /// Chain/Flag as the input and updates them for the output Chain/Flag.
643 /// If the Flag pointer is NULL, no flag is used.
644 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
645 SDLoc dl,
646 SDValue &Chain, SDValue *Flag,
647 const Value *V = nullptr) const;
648
649 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
650 /// specified value into the registers specified by this object. This uses
651 /// Chain/Flag as the input and updates them for the output Chain/Flag.
652 /// If the Flag pointer is NULL, no flag is used.
653 void
654 getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl, SDValue &Chain,
655 SDValue *Flag, const Value *V,
656 ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const;
657
658 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
659 /// operand list. This adds the code marker, matching input operand index
660 /// (if applicable), and includes the number of values added into it.
661 void AddInlineAsmOperands(unsigned Kind,
662 bool HasMatching, unsigned MatchingIdx,
663 SelectionDAG &DAG,
664 std::vector<SDValue> &Ops) const;
665 };
666 }
667
668 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
669 /// this value and returns the result as a ValueVT value. This uses
670 /// Chain/Flag as the input and updates them for the output Chain/Flag.
671 /// If the Flag pointer is NULL, no flag is used.
getCopyFromRegs(SelectionDAG & DAG,FunctionLoweringInfo & FuncInfo,SDLoc dl,SDValue & Chain,SDValue * Flag,const Value * V) const672 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
673 FunctionLoweringInfo &FuncInfo,
674 SDLoc dl,
675 SDValue &Chain, SDValue *Flag,
676 const Value *V) const {
677 // A Value with type {} or [0 x %t] needs no registers.
678 if (ValueVTs.empty())
679 return SDValue();
680
681 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
682
683 // Assemble the legal parts into the final values.
684 SmallVector<SDValue, 4> Values(ValueVTs.size());
685 SmallVector<SDValue, 8> Parts;
686 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
687 // Copy the legal parts from the registers.
688 EVT ValueVT = ValueVTs[Value];
689 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
690 MVT RegisterVT = RegVTs[Value];
691
692 Parts.resize(NumRegs);
693 for (unsigned i = 0; i != NumRegs; ++i) {
694 SDValue P;
695 if (!Flag) {
696 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
697 } else {
698 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
699 *Flag = P.getValue(2);
700 }
701
702 Chain = P.getValue(1);
703 Parts[i] = P;
704
705 // If the source register was virtual and if we know something about it,
706 // add an assert node.
707 if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
708 !RegisterVT.isInteger() || RegisterVT.isVector())
709 continue;
710
711 const FunctionLoweringInfo::LiveOutInfo *LOI =
712 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
713 if (!LOI)
714 continue;
715
716 unsigned RegSize = RegisterVT.getSizeInBits();
717 unsigned NumSignBits = LOI->NumSignBits;
718 unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes();
719
720 if (NumZeroBits == RegSize) {
721 // The current value is a zero.
722 // Explicitly express that as it would be easier for
723 // optimizations to kick in.
724 Parts[i] = DAG.getConstant(0, RegisterVT);
725 continue;
726 }
727
728 // FIXME: We capture more information than the dag can represent. For
729 // now, just use the tightest assertzext/assertsext possible.
730 bool isSExt = true;
731 EVT FromVT(MVT::Other);
732 if (NumSignBits == RegSize)
733 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
734 else if (NumZeroBits >= RegSize-1)
735 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
736 else if (NumSignBits > RegSize-8)
737 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
738 else if (NumZeroBits >= RegSize-8)
739 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
740 else if (NumSignBits > RegSize-16)
741 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
742 else if (NumZeroBits >= RegSize-16)
743 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
744 else if (NumSignBits > RegSize-32)
745 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
746 else if (NumZeroBits >= RegSize-32)
747 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
748 else
749 continue;
750
751 // Add an assertion node.
752 assert(FromVT != MVT::Other);
753 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
754 RegisterVT, P, DAG.getValueType(FromVT));
755 }
756
757 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
758 NumRegs, RegisterVT, ValueVT, V);
759 Part += NumRegs;
760 Parts.clear();
761 }
762
763 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
764 }
765
766 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
767 /// specified value into the registers specified by this object. This uses
768 /// Chain/Flag as the input and updates them for the output Chain/Flag.
769 /// If the Flag pointer is NULL, no flag is used.
getCopyToRegs(SDValue Val,SelectionDAG & DAG,SDLoc dl,SDValue & Chain,SDValue * Flag,const Value * V,ISD::NodeType PreferredExtendType) const770 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
771 SDValue &Chain, SDValue *Flag, const Value *V,
772 ISD::NodeType PreferredExtendType) const {
773 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
774 ISD::NodeType ExtendKind = PreferredExtendType;
775
776 // Get the list of the values's legal parts.
777 unsigned NumRegs = Regs.size();
778 SmallVector<SDValue, 8> Parts(NumRegs);
779 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
780 EVT ValueVT = ValueVTs[Value];
781 unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
782 MVT RegisterVT = RegVTs[Value];
783
784 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
785 ExtendKind = ISD::ZERO_EXTEND;
786
787 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
788 &Parts[Part], NumParts, RegisterVT, V, ExtendKind);
789 Part += NumParts;
790 }
791
792 // Copy the parts into the registers.
793 SmallVector<SDValue, 8> Chains(NumRegs);
794 for (unsigned i = 0; i != NumRegs; ++i) {
795 SDValue Part;
796 if (!Flag) {
797 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
798 } else {
799 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
800 *Flag = Part.getValue(1);
801 }
802
803 Chains[i] = Part.getValue(0);
804 }
805
806 if (NumRegs == 1 || Flag)
807 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
808 // flagged to it. That is the CopyToReg nodes and the user are considered
809 // a single scheduling unit. If we create a TokenFactor and return it as
810 // chain, then the TokenFactor is both a predecessor (operand) of the
811 // user as well as a successor (the TF operands are flagged to the user).
812 // c1, f1 = CopyToReg
813 // c2, f2 = CopyToReg
814 // c3 = TokenFactor c1, c2
815 // ...
816 // = op c3, ..., f2
817 Chain = Chains[NumRegs-1];
818 else
819 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
820 }
821
822 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
823 /// operand list. This adds the code marker and includes the number of
824 /// values added into it.
AddInlineAsmOperands(unsigned Code,bool HasMatching,unsigned MatchingIdx,SelectionDAG & DAG,std::vector<SDValue> & Ops) const825 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
826 unsigned MatchingIdx,
827 SelectionDAG &DAG,
828 std::vector<SDValue> &Ops) const {
829 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
830
831 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
832 if (HasMatching)
833 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
834 else if (!Regs.empty() &&
835 TargetRegisterInfo::isVirtualRegister(Regs.front())) {
836 // Put the register class of the virtual registers in the flag word. That
837 // way, later passes can recompute register class constraints for inline
838 // assembly as well as normal instructions.
839 // Don't do this for tied operands that can use the regclass information
840 // from the def.
841 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
842 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
843 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
844 }
845
846 SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
847 Ops.push_back(Res);
848
849 unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
850 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
851 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
852 MVT RegisterVT = RegVTs[Value];
853 for (unsigned i = 0; i != NumRegs; ++i) {
854 assert(Reg < Regs.size() && "Mismatch in # registers expected");
855 unsigned TheReg = Regs[Reg++];
856 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
857
858 if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
859 // If we clobbered the stack pointer, MFI should know about it.
860 assert(DAG.getMachineFunction().getFrameInfo()->
861 hasInlineAsmWithSPAdjust());
862 }
863 }
864 }
865 }
866
init(GCFunctionInfo * gfi,AliasAnalysis & aa,const TargetLibraryInfo * li)867 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
868 const TargetLibraryInfo *li) {
869 AA = &aa;
870 GFI = gfi;
871 LibInfo = li;
872 DL = DAG.getSubtarget().getDataLayout();
873 Context = DAG.getContext();
874 LPadToCallSiteMap.clear();
875 }
876
877 /// clear - Clear out the current SelectionDAG and the associated
878 /// state and prepare this SelectionDAGBuilder object to be used
879 /// for a new block. This doesn't clear out information about
880 /// additional blocks that are needed to complete switch lowering
881 /// or PHI node updating; that information is cleared out as it is
882 /// consumed.
clear()883 void SelectionDAGBuilder::clear() {
884 NodeMap.clear();
885 UnusedArgNodeMap.clear();
886 PendingLoads.clear();
887 PendingExports.clear();
888 CurInst = nullptr;
889 HasTailCall = false;
890 SDNodeOrder = LowestSDNodeOrder;
891 StatepointLowering.clear();
892 }
893
894 /// clearDanglingDebugInfo - Clear the dangling debug information
895 /// map. This function is separated from the clear so that debug
896 /// information that is dangling in a basic block can be properly
897 /// resolved in a different basic block. This allows the
898 /// SelectionDAG to resolve dangling debug information attached
899 /// to PHI nodes.
clearDanglingDebugInfo()900 void SelectionDAGBuilder::clearDanglingDebugInfo() {
901 DanglingDebugInfoMap.clear();
902 }
903
904 /// getRoot - Return the current virtual root of the Selection DAG,
905 /// flushing any PendingLoad items. This must be done before emitting
906 /// a store or any other node that may need to be ordered after any
907 /// prior load instructions.
908 ///
getRoot()909 SDValue SelectionDAGBuilder::getRoot() {
910 if (PendingLoads.empty())
911 return DAG.getRoot();
912
913 if (PendingLoads.size() == 1) {
914 SDValue Root = PendingLoads[0];
915 DAG.setRoot(Root);
916 PendingLoads.clear();
917 return Root;
918 }
919
920 // Otherwise, we have to make a token factor node.
921 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
922 PendingLoads);
923 PendingLoads.clear();
924 DAG.setRoot(Root);
925 return Root;
926 }
927
928 /// getControlRoot - Similar to getRoot, but instead of flushing all the
929 /// PendingLoad items, flush all the PendingExports items. It is necessary
930 /// to do this before emitting a terminator instruction.
931 ///
getControlRoot()932 SDValue SelectionDAGBuilder::getControlRoot() {
933 SDValue Root = DAG.getRoot();
934
935 if (PendingExports.empty())
936 return Root;
937
938 // Turn all of the CopyToReg chains into one factored node.
939 if (Root.getOpcode() != ISD::EntryToken) {
940 unsigned i = 0, e = PendingExports.size();
941 for (; i != e; ++i) {
942 assert(PendingExports[i].getNode()->getNumOperands() > 1);
943 if (PendingExports[i].getNode()->getOperand(0) == Root)
944 break; // Don't add the root if we already indirectly depend on it.
945 }
946
947 if (i == e)
948 PendingExports.push_back(Root);
949 }
950
951 Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
952 PendingExports);
953 PendingExports.clear();
954 DAG.setRoot(Root);
955 return Root;
956 }
957
visit(const Instruction & I)958 void SelectionDAGBuilder::visit(const Instruction &I) {
959 // Set up outgoing PHI node register values before emitting the terminator.
960 if (isa<TerminatorInst>(&I))
961 HandlePHINodesInSuccessorBlocks(I.getParent());
962
963 ++SDNodeOrder;
964
965 CurInst = &I;
966
967 visit(I.getOpcode(), I);
968
969 if (!isa<TerminatorInst>(&I) && !HasTailCall)
970 CopyToExportRegsIfNeeded(&I);
971
972 CurInst = nullptr;
973 }
974
visitPHI(const PHINode &)975 void SelectionDAGBuilder::visitPHI(const PHINode &) {
976 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
977 }
978
visit(unsigned Opcode,const User & I)979 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
980 // Note: this doesn't use InstVisitor, because it has to work with
981 // ConstantExpr's in addition to instructions.
982 switch (Opcode) {
983 default: llvm_unreachable("Unknown instruction type encountered!");
984 // Build the switch statement using the Instruction.def file.
985 #define HANDLE_INST(NUM, OPCODE, CLASS) \
986 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
987 #include "llvm/IR/Instruction.def"
988 }
989 }
990
991 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
992 // generate the debug data structures now that we've seen its definition.
resolveDanglingDebugInfo(const Value * V,SDValue Val)993 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
994 SDValue Val) {
995 DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
996 if (DDI.getDI()) {
997 const DbgValueInst *DI = DDI.getDI();
998 DebugLoc dl = DDI.getdl();
999 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1000 MDNode *Variable = DI->getVariable();
1001 MDNode *Expr = DI->getExpression();
1002 uint64_t Offset = DI->getOffset();
1003 // A dbg.value for an alloca is always indirect.
1004 bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
1005 SDDbgValue *SDV;
1006 if (Val.getNode()) {
1007 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, Offset, IsIndirect,
1008 Val)) {
1009 SDV = DAG.getDbgValue(Variable, Expr, Val.getNode(), Val.getResNo(),
1010 IsIndirect, Offset, dl, DbgSDNodeOrder);
1011 DAG.AddDbgValue(SDV, Val.getNode(), false);
1012 }
1013 } else
1014 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1015 DanglingDebugInfoMap[V] = DanglingDebugInfo();
1016 }
1017 }
1018
1019 /// getValue - Return an SDValue for the given Value.
getValue(const Value * V)1020 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1021 // If we already have an SDValue for this value, use it. It's important
1022 // to do this first, so that we don't create a CopyFromReg if we already
1023 // have a regular SDValue.
1024 SDValue &N = NodeMap[V];
1025 if (N.getNode()) return N;
1026
1027 // If there's a virtual register allocated and initialized for this
1028 // value, use it.
1029 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
1030 if (It != FuncInfo.ValueMap.end()) {
1031 unsigned InReg = It->second;
1032 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), InReg,
1033 V->getType());
1034 SDValue Chain = DAG.getEntryNode();
1035 N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1036 resolveDanglingDebugInfo(V, N);
1037 return N;
1038 }
1039
1040 // Otherwise create a new SDValue and remember it.
1041 SDValue Val = getValueImpl(V);
1042 NodeMap[V] = Val;
1043 resolveDanglingDebugInfo(V, Val);
1044 return Val;
1045 }
1046
1047 /// getNonRegisterValue - Return an SDValue for the given Value, but
1048 /// don't look in FuncInfo.ValueMap for a virtual register.
getNonRegisterValue(const Value * V)1049 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1050 // If we already have an SDValue for this value, use it.
1051 SDValue &N = NodeMap[V];
1052 if (N.getNode()) return N;
1053
1054 // Otherwise create a new SDValue and remember it.
1055 SDValue Val = getValueImpl(V);
1056 NodeMap[V] = Val;
1057 resolveDanglingDebugInfo(V, Val);
1058 return Val;
1059 }
1060
1061 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1062 /// Create an SDValue for the given value.
getValueImpl(const Value * V)1063 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1064 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1065
1066 if (const Constant *C = dyn_cast<Constant>(V)) {
1067 EVT VT = TLI.getValueType(V->getType(), true);
1068
1069 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1070 return DAG.getConstant(*CI, VT);
1071
1072 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1073 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1074
1075 if (isa<ConstantPointerNull>(C)) {
1076 unsigned AS = V->getType()->getPointerAddressSpace();
1077 return DAG.getConstant(0, TLI.getPointerTy(AS));
1078 }
1079
1080 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1081 return DAG.getConstantFP(*CFP, VT);
1082
1083 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1084 return DAG.getUNDEF(VT);
1085
1086 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1087 visit(CE->getOpcode(), *CE);
1088 SDValue N1 = NodeMap[V];
1089 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1090 return N1;
1091 }
1092
1093 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1094 SmallVector<SDValue, 4> Constants;
1095 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1096 OI != OE; ++OI) {
1097 SDNode *Val = getValue(*OI).getNode();
1098 // If the operand is an empty aggregate, there are no values.
1099 if (!Val) continue;
1100 // Add each leaf value from the operand to the Constants list
1101 // to form a flattened list of all the values.
1102 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1103 Constants.push_back(SDValue(Val, i));
1104 }
1105
1106 return DAG.getMergeValues(Constants, getCurSDLoc());
1107 }
1108
1109 if (const ConstantDataSequential *CDS =
1110 dyn_cast<ConstantDataSequential>(C)) {
1111 SmallVector<SDValue, 4> Ops;
1112 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1113 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1114 // Add each leaf value from the operand to the Constants list
1115 // to form a flattened list of all the values.
1116 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1117 Ops.push_back(SDValue(Val, i));
1118 }
1119
1120 if (isa<ArrayType>(CDS->getType()))
1121 return DAG.getMergeValues(Ops, getCurSDLoc());
1122 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
1123 VT, Ops);
1124 }
1125
1126 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1127 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1128 "Unknown struct or array constant!");
1129
1130 SmallVector<EVT, 4> ValueVTs;
1131 ComputeValueVTs(TLI, C->getType(), ValueVTs);
1132 unsigned NumElts = ValueVTs.size();
1133 if (NumElts == 0)
1134 return SDValue(); // empty struct
1135 SmallVector<SDValue, 4> Constants(NumElts);
1136 for (unsigned i = 0; i != NumElts; ++i) {
1137 EVT EltVT = ValueVTs[i];
1138 if (isa<UndefValue>(C))
1139 Constants[i] = DAG.getUNDEF(EltVT);
1140 else if (EltVT.isFloatingPoint())
1141 Constants[i] = DAG.getConstantFP(0, EltVT);
1142 else
1143 Constants[i] = DAG.getConstant(0, EltVT);
1144 }
1145
1146 return DAG.getMergeValues(Constants, getCurSDLoc());
1147 }
1148
1149 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1150 return DAG.getBlockAddress(BA, VT);
1151
1152 VectorType *VecTy = cast<VectorType>(V->getType());
1153 unsigned NumElements = VecTy->getNumElements();
1154
1155 // Now that we know the number and type of the elements, get that number of
1156 // elements into the Ops array based on what kind of constant it is.
1157 SmallVector<SDValue, 16> Ops;
1158 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1159 for (unsigned i = 0; i != NumElements; ++i)
1160 Ops.push_back(getValue(CV->getOperand(i)));
1161 } else {
1162 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1163 EVT EltVT = TLI.getValueType(VecTy->getElementType());
1164
1165 SDValue Op;
1166 if (EltVT.isFloatingPoint())
1167 Op = DAG.getConstantFP(0, EltVT);
1168 else
1169 Op = DAG.getConstant(0, EltVT);
1170 Ops.assign(NumElements, Op);
1171 }
1172
1173 // Create a BUILD_VECTOR node.
1174 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops);
1175 }
1176
1177 // If this is a static alloca, generate it as the frameindex instead of
1178 // computation.
1179 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1180 DenseMap<const AllocaInst*, int>::iterator SI =
1181 FuncInfo.StaticAllocaMap.find(AI);
1182 if (SI != FuncInfo.StaticAllocaMap.end())
1183 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
1184 }
1185
1186 // If this is an instruction which fast-isel has deferred, select it now.
1187 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1188 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1189 RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
1190 SDValue Chain = DAG.getEntryNode();
1191 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1192 }
1193
1194 llvm_unreachable("Can't get register for value!");
1195 }
1196
visitRet(const ReturnInst & I)1197 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1198 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1199 SDValue Chain = getControlRoot();
1200 SmallVector<ISD::OutputArg, 8> Outs;
1201 SmallVector<SDValue, 8> OutVals;
1202
1203 if (!FuncInfo.CanLowerReturn) {
1204 unsigned DemoteReg = FuncInfo.DemoteRegister;
1205 const Function *F = I.getParent()->getParent();
1206
1207 // Emit a store of the return value through the virtual register.
1208 // Leave Outs empty so that LowerReturn won't try to load return
1209 // registers the usual way.
1210 SmallVector<EVT, 1> PtrValueVTs;
1211 ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
1212 PtrValueVTs);
1213
1214 SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
1215 SDValue RetOp = getValue(I.getOperand(0));
1216
1217 SmallVector<EVT, 4> ValueVTs;
1218 SmallVector<uint64_t, 4> Offsets;
1219 ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1220 unsigned NumValues = ValueVTs.size();
1221
1222 SmallVector<SDValue, 4> Chains(NumValues);
1223 for (unsigned i = 0; i != NumValues; ++i) {
1224 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(),
1225 RetPtr.getValueType(), RetPtr,
1226 DAG.getIntPtrConstant(Offsets[i]));
1227 Chains[i] =
1228 DAG.getStore(Chain, getCurSDLoc(),
1229 SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1230 // FIXME: better loc info would be nice.
1231 Add, MachinePointerInfo(), false, false, 0);
1232 }
1233
1234 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1235 MVT::Other, Chains);
1236 } else if (I.getNumOperands() != 0) {
1237 SmallVector<EVT, 4> ValueVTs;
1238 ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs);
1239 unsigned NumValues = ValueVTs.size();
1240 if (NumValues) {
1241 SDValue RetOp = getValue(I.getOperand(0));
1242
1243 const Function *F = I.getParent()->getParent();
1244
1245 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1246 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1247 Attribute::SExt))
1248 ExtendKind = ISD::SIGN_EXTEND;
1249 else if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1250 Attribute::ZExt))
1251 ExtendKind = ISD::ZERO_EXTEND;
1252
1253 LLVMContext &Context = F->getContext();
1254 bool RetInReg = F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1255 Attribute::InReg);
1256
1257 for (unsigned j = 0; j != NumValues; ++j) {
1258 EVT VT = ValueVTs[j];
1259
1260 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1261 VT = TLI.getTypeForExtArgOrReturn(Context, VT, ExtendKind);
1262
1263 unsigned NumParts = TLI.getNumRegisters(Context, VT);
1264 MVT PartVT = TLI.getRegisterType(Context, VT);
1265 SmallVector<SDValue, 4> Parts(NumParts);
1266 getCopyToParts(DAG, getCurSDLoc(),
1267 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1268 &Parts[0], NumParts, PartVT, &I, ExtendKind);
1269
1270 // 'inreg' on function refers to return value
1271 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1272 if (RetInReg)
1273 Flags.setInReg();
1274
1275 // Propagate extension type if any
1276 if (ExtendKind == ISD::SIGN_EXTEND)
1277 Flags.setSExt();
1278 else if (ExtendKind == ISD::ZERO_EXTEND)
1279 Flags.setZExt();
1280
1281 for (unsigned i = 0; i < NumParts; ++i) {
1282 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1283 VT, /*isfixed=*/true, 0, 0));
1284 OutVals.push_back(Parts[i]);
1285 }
1286 }
1287 }
1288 }
1289
1290 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
1291 CallingConv::ID CallConv =
1292 DAG.getMachineFunction().getFunction()->getCallingConv();
1293 Chain = DAG.getTargetLoweringInfo().LowerReturn(
1294 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1295
1296 // Verify that the target's LowerReturn behaved as expected.
1297 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1298 "LowerReturn didn't return a valid chain!");
1299
1300 // Update the DAG with the new chain value resulting from return lowering.
1301 DAG.setRoot(Chain);
1302 }
1303
1304 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1305 /// created for it, emit nodes to copy the value into the virtual
1306 /// registers.
CopyToExportRegsIfNeeded(const Value * V)1307 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1308 // Skip empty types
1309 if (V->getType()->isEmptyTy())
1310 return;
1311
1312 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1313 if (VMI != FuncInfo.ValueMap.end()) {
1314 assert(!V->use_empty() && "Unused value assigned virtual registers!");
1315 CopyValueToVirtualRegister(V, VMI->second);
1316 }
1317 }
1318
1319 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1320 /// the current basic block, add it to ValueMap now so that we'll get a
1321 /// CopyTo/FromReg.
ExportFromCurrentBlock(const Value * V)1322 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1323 // No need to export constants.
1324 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1325
1326 // Already exported?
1327 if (FuncInfo.isExportedInst(V)) return;
1328
1329 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1330 CopyValueToVirtualRegister(V, Reg);
1331 }
1332
isExportableFromCurrentBlock(const Value * V,const BasicBlock * FromBB)1333 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1334 const BasicBlock *FromBB) {
1335 // The operands of the setcc have to be in this block. We don't know
1336 // how to export them from some other block.
1337 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1338 // Can export from current BB.
1339 if (VI->getParent() == FromBB)
1340 return true;
1341
1342 // Is already exported, noop.
1343 return FuncInfo.isExportedInst(V);
1344 }
1345
1346 // If this is an argument, we can export it if the BB is the entry block or
1347 // if it is already exported.
1348 if (isa<Argument>(V)) {
1349 if (FromBB == &FromBB->getParent()->getEntryBlock())
1350 return true;
1351
1352 // Otherwise, can only export this if it is already exported.
1353 return FuncInfo.isExportedInst(V);
1354 }
1355
1356 // Otherwise, constants can always be exported.
1357 return true;
1358 }
1359
1360 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
getEdgeWeight(const MachineBasicBlock * Src,const MachineBasicBlock * Dst) const1361 uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src,
1362 const MachineBasicBlock *Dst) const {
1363 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1364 if (!BPI)
1365 return 0;
1366 const BasicBlock *SrcBB = Src->getBasicBlock();
1367 const BasicBlock *DstBB = Dst->getBasicBlock();
1368 return BPI->getEdgeWeight(SrcBB, DstBB);
1369 }
1370
1371 void SelectionDAGBuilder::
addSuccessorWithWeight(MachineBasicBlock * Src,MachineBasicBlock * Dst,uint32_t Weight)1372 addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst,
1373 uint32_t Weight /* = 0 */) {
1374 if (!Weight)
1375 Weight = getEdgeWeight(Src, Dst);
1376 Src->addSuccessor(Dst, Weight);
1377 }
1378
1379
InBlock(const Value * V,const BasicBlock * BB)1380 static bool InBlock(const Value *V, const BasicBlock *BB) {
1381 if (const Instruction *I = dyn_cast<Instruction>(V))
1382 return I->getParent() == BB;
1383 return true;
1384 }
1385
1386 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1387 /// This function emits a branch and is used at the leaves of an OR or an
1388 /// AND operator tree.
1389 ///
1390 void
EmitBranchForMergedCondition(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,uint32_t TWeight,uint32_t FWeight)1391 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1392 MachineBasicBlock *TBB,
1393 MachineBasicBlock *FBB,
1394 MachineBasicBlock *CurBB,
1395 MachineBasicBlock *SwitchBB,
1396 uint32_t TWeight,
1397 uint32_t FWeight) {
1398 const BasicBlock *BB = CurBB->getBasicBlock();
1399
1400 // If the leaf of the tree is a comparison, merge the condition into
1401 // the caseblock.
1402 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1403 // The operands of the cmp have to be in this block. We don't know
1404 // how to export them from some other block. If this is the first block
1405 // of the sequence, no exporting is needed.
1406 if (CurBB == SwitchBB ||
1407 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1408 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1409 ISD::CondCode Condition;
1410 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1411 Condition = getICmpCondCode(IC->getPredicate());
1412 } else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1413 Condition = getFCmpCondCode(FC->getPredicate());
1414 if (TM.Options.NoNaNsFPMath)
1415 Condition = getFCmpCodeWithoutNaN(Condition);
1416 } else {
1417 (void)Condition; // silence warning.
1418 llvm_unreachable("Unknown compare instruction");
1419 }
1420
1421 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1422 TBB, FBB, CurBB, TWeight, FWeight);
1423 SwitchCases.push_back(CB);
1424 return;
1425 }
1426 }
1427
1428 // Create a CaseBlock record representing this branch.
1429 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1430 nullptr, TBB, FBB, CurBB, TWeight, FWeight);
1431 SwitchCases.push_back(CB);
1432 }
1433
1434 /// Scale down both weights to fit into uint32_t.
ScaleWeights(uint64_t & NewTrue,uint64_t & NewFalse)1435 static void ScaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
1436 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
1437 uint32_t Scale = (NewMax / UINT32_MAX) + 1;
1438 NewTrue = NewTrue / Scale;
1439 NewFalse = NewFalse / Scale;
1440 }
1441
1442 /// FindMergedConditions - If Cond is an expression like
FindMergedConditions(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,unsigned Opc,uint32_t TWeight,uint32_t FWeight)1443 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1444 MachineBasicBlock *TBB,
1445 MachineBasicBlock *FBB,
1446 MachineBasicBlock *CurBB,
1447 MachineBasicBlock *SwitchBB,
1448 unsigned Opc, uint32_t TWeight,
1449 uint32_t FWeight) {
1450 // If this node is not part of the or/and tree, emit it as a branch.
1451 const Instruction *BOp = dyn_cast<Instruction>(Cond);
1452 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1453 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1454 BOp->getParent() != CurBB->getBasicBlock() ||
1455 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1456 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1457 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1458 TWeight, FWeight);
1459 return;
1460 }
1461
1462 // Create TmpBB after CurBB.
1463 MachineFunction::iterator BBI = CurBB;
1464 MachineFunction &MF = DAG.getMachineFunction();
1465 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1466 CurBB->getParent()->insert(++BBI, TmpBB);
1467
1468 if (Opc == Instruction::Or) {
1469 // Codegen X | Y as:
1470 // BB1:
1471 // jmp_if_X TBB
1472 // jmp TmpBB
1473 // TmpBB:
1474 // jmp_if_Y TBB
1475 // jmp FBB
1476 //
1477
1478 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1479 // The requirement is that
1480 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1481 // = TrueProb for orignal BB.
1482 // Assuming the orignal weights are A and B, one choice is to set BB1's
1483 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
1484 // assumes that
1485 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1486 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1487 // TmpBB, but the math is more complicated.
1488
1489 uint64_t NewTrueWeight = TWeight;
1490 uint64_t NewFalseWeight = (uint64_t)TWeight + 2 * (uint64_t)FWeight;
1491 ScaleWeights(NewTrueWeight, NewFalseWeight);
1492 // Emit the LHS condition.
1493 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1494 NewTrueWeight, NewFalseWeight);
1495
1496 NewTrueWeight = TWeight;
1497 NewFalseWeight = 2 * (uint64_t)FWeight;
1498 ScaleWeights(NewTrueWeight, NewFalseWeight);
1499 // Emit the RHS condition into TmpBB.
1500 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1501 NewTrueWeight, NewFalseWeight);
1502 } else {
1503 assert(Opc == Instruction::And && "Unknown merge op!");
1504 // Codegen X & Y as:
1505 // BB1:
1506 // jmp_if_X TmpBB
1507 // jmp FBB
1508 // TmpBB:
1509 // jmp_if_Y TBB
1510 // jmp FBB
1511 //
1512 // This requires creation of TmpBB after CurBB.
1513
1514 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1515 // The requirement is that
1516 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1517 // = FalseProb for orignal BB.
1518 // Assuming the orignal weights are A and B, one choice is to set BB1's
1519 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
1520 // assumes that
1521 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
1522
1523 uint64_t NewTrueWeight = 2 * (uint64_t)TWeight + (uint64_t)FWeight;
1524 uint64_t NewFalseWeight = FWeight;
1525 ScaleWeights(NewTrueWeight, NewFalseWeight);
1526 // Emit the LHS condition.
1527 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1528 NewTrueWeight, NewFalseWeight);
1529
1530 NewTrueWeight = 2 * (uint64_t)TWeight;
1531 NewFalseWeight = FWeight;
1532 ScaleWeights(NewTrueWeight, NewFalseWeight);
1533 // Emit the RHS condition into TmpBB.
1534 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1535 NewTrueWeight, NewFalseWeight);
1536 }
1537 }
1538
1539 /// If the set of cases should be emitted as a series of branches, return true.
1540 /// If we should emit this as a bunch of and/or'd together conditions, return
1541 /// false.
1542 bool
ShouldEmitAsBranches(const std::vector<CaseBlock> & Cases)1543 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1544 if (Cases.size() != 2) return true;
1545
1546 // If this is two comparisons of the same values or'd or and'd together, they
1547 // will get folded into a single comparison, so don't emit two blocks.
1548 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1549 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1550 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1551 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1552 return false;
1553 }
1554
1555 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1556 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1557 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1558 Cases[0].CC == Cases[1].CC &&
1559 isa<Constant>(Cases[0].CmpRHS) &&
1560 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1561 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1562 return false;
1563 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1564 return false;
1565 }
1566
1567 return true;
1568 }
1569
visitBr(const BranchInst & I)1570 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1571 MachineBasicBlock *BrMBB = FuncInfo.MBB;
1572
1573 // Update machine-CFG edges.
1574 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1575
1576 // Figure out which block is immediately after the current one.
1577 MachineBasicBlock *NextBlock = nullptr;
1578 MachineFunction::iterator BBI = BrMBB;
1579 if (++BBI != FuncInfo.MF->end())
1580 NextBlock = BBI;
1581
1582 if (I.isUnconditional()) {
1583 // Update machine-CFG edges.
1584 BrMBB->addSuccessor(Succ0MBB);
1585
1586 // If this is not a fall-through branch or optimizations are switched off,
1587 // emit the branch.
1588 if (Succ0MBB != NextBlock || TM.getOptLevel() == CodeGenOpt::None)
1589 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1590 MVT::Other, getControlRoot(),
1591 DAG.getBasicBlock(Succ0MBB)));
1592
1593 return;
1594 }
1595
1596 // If this condition is one of the special cases we handle, do special stuff
1597 // now.
1598 const Value *CondVal = I.getCondition();
1599 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1600
1601 // If this is a series of conditions that are or'd or and'd together, emit
1602 // this as a sequence of branches instead of setcc's with and/or operations.
1603 // As long as jumps are not expensive, this should improve performance.
1604 // For example, instead of something like:
1605 // cmp A, B
1606 // C = seteq
1607 // cmp D, E
1608 // F = setle
1609 // or C, F
1610 // jnz foo
1611 // Emit:
1612 // cmp A, B
1613 // je foo
1614 // cmp D, E
1615 // jle foo
1616 //
1617 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1618 if (!DAG.getTargetLoweringInfo().isJumpExpensive() &&
1619 BOp->hasOneUse() && (BOp->getOpcode() == Instruction::And ||
1620 BOp->getOpcode() == Instruction::Or)) {
1621 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
1622 BOp->getOpcode(), getEdgeWeight(BrMBB, Succ0MBB),
1623 getEdgeWeight(BrMBB, Succ1MBB));
1624 // If the compares in later blocks need to use values not currently
1625 // exported from this block, export them now. This block should always
1626 // be the first entry.
1627 assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
1628
1629 // Allow some cases to be rejected.
1630 if (ShouldEmitAsBranches(SwitchCases)) {
1631 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1632 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1633 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1634 }
1635
1636 // Emit the branch for this block.
1637 visitSwitchCase(SwitchCases[0], BrMBB);
1638 SwitchCases.erase(SwitchCases.begin());
1639 return;
1640 }
1641
1642 // Okay, we decided not to do this, remove any inserted MBB's and clear
1643 // SwitchCases.
1644 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1645 FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1646
1647 SwitchCases.clear();
1648 }
1649 }
1650
1651 // Create a CaseBlock record representing this branch.
1652 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1653 nullptr, Succ0MBB, Succ1MBB, BrMBB);
1654
1655 // Use visitSwitchCase to actually insert the fast branch sequence for this
1656 // cond branch.
1657 visitSwitchCase(CB, BrMBB);
1658 }
1659
1660 /// visitSwitchCase - Emits the necessary code to represent a single node in
1661 /// the binary search tree resulting from lowering a switch instruction.
visitSwitchCase(CaseBlock & CB,MachineBasicBlock * SwitchBB)1662 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
1663 MachineBasicBlock *SwitchBB) {
1664 SDValue Cond;
1665 SDValue CondLHS = getValue(CB.CmpLHS);
1666 SDLoc dl = getCurSDLoc();
1667
1668 // Build the setcc now.
1669 if (!CB.CmpMHS) {
1670 // Fold "(X == true)" to X and "(X == false)" to !X to
1671 // handle common cases produced by branch lowering.
1672 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1673 CB.CC == ISD::SETEQ)
1674 Cond = CondLHS;
1675 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1676 CB.CC == ISD::SETEQ) {
1677 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1678 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1679 } else
1680 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1681 } else {
1682 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1683
1684 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1685 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1686
1687 SDValue CmpOp = getValue(CB.CmpMHS);
1688 EVT VT = CmpOp.getValueType();
1689
1690 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1691 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1692 ISD::SETLE);
1693 } else {
1694 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1695 VT, CmpOp, DAG.getConstant(Low, VT));
1696 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1697 DAG.getConstant(High-Low, VT), ISD::SETULE);
1698 }
1699 }
1700
1701 // Update successor info
1702 addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight);
1703 // TrueBB and FalseBB are always different unless the incoming IR is
1704 // degenerate. This only happens when running llc on weird IR.
1705 if (CB.TrueBB != CB.FalseBB)
1706 addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight);
1707
1708 // Set NextBlock to be the MBB immediately after the current one, if any.
1709 // This is used to avoid emitting unnecessary branches to the next block.
1710 MachineBasicBlock *NextBlock = nullptr;
1711 MachineFunction::iterator BBI = SwitchBB;
1712 if (++BBI != FuncInfo.MF->end())
1713 NextBlock = BBI;
1714
1715 // If the lhs block is the next block, invert the condition so that we can
1716 // fall through to the lhs instead of the rhs block.
1717 if (CB.TrueBB == NextBlock) {
1718 std::swap(CB.TrueBB, CB.FalseBB);
1719 SDValue True = DAG.getConstant(1, Cond.getValueType());
1720 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1721 }
1722
1723 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1724 MVT::Other, getControlRoot(), Cond,
1725 DAG.getBasicBlock(CB.TrueBB));
1726
1727 // Insert the false branch. Do this even if it's a fall through branch,
1728 // this makes it easier to do DAG optimizations which require inverting
1729 // the branch condition.
1730 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1731 DAG.getBasicBlock(CB.FalseBB));
1732
1733 DAG.setRoot(BrCond);
1734 }
1735
1736 /// visitJumpTable - Emit JumpTable node in the current MBB
visitJumpTable(JumpTable & JT)1737 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1738 // Emit the code for the jump table
1739 assert(JT.Reg != -1U && "Should lower JT Header first!");
1740 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy();
1741 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
1742 JT.Reg, PTy);
1743 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1744 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
1745 MVT::Other, Index.getValue(1),
1746 Table, Index);
1747 DAG.setRoot(BrJumpTable);
1748 }
1749
1750 /// visitJumpTableHeader - This function emits necessary code to produce index
1751 /// in the JumpTable from switch case.
visitJumpTableHeader(JumpTable & JT,JumpTableHeader & JTH,MachineBasicBlock * SwitchBB)1752 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1753 JumpTableHeader &JTH,
1754 MachineBasicBlock *SwitchBB) {
1755 // Subtract the lowest switch case value from the value being switched on and
1756 // conditional branch to default mbb if the result is greater than the
1757 // difference between smallest and largest cases.
1758 SDValue SwitchOp = getValue(JTH.SValue);
1759 EVT VT = SwitchOp.getValueType();
1760 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, SwitchOp,
1761 DAG.getConstant(JTH.First, VT));
1762
1763 // The SDNode we just created, which holds the value being switched on minus
1764 // the smallest case value, needs to be copied to a virtual register so it
1765 // can be used as an index into the jump table in a subsequent basic block.
1766 // This value may be smaller or larger than the target's pointer type, and
1767 // therefore require extension or truncating.
1768 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1769 SwitchOp = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), TLI.getPointerTy());
1770
1771 unsigned JumpTableReg = FuncInfo.CreateReg(TLI.getPointerTy());
1772 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(),
1773 JumpTableReg, SwitchOp);
1774 JT.Reg = JumpTableReg;
1775
1776 // Emit the range check for the jump table, and branch to the default block
1777 // for the switch statement if the value being switched on exceeds the largest
1778 // case in the switch.
1779 SDValue CMP =
1780 DAG.getSetCC(getCurSDLoc(), TLI.getSetCCResultType(*DAG.getContext(),
1781 Sub.getValueType()),
1782 Sub, DAG.getConstant(JTH.Last - JTH.First, VT), ISD::SETUGT);
1783
1784 // Set NextBlock to be the MBB immediately after the current one, if any.
1785 // This is used to avoid emitting unnecessary branches to the next block.
1786 MachineBasicBlock *NextBlock = nullptr;
1787 MachineFunction::iterator BBI = SwitchBB;
1788
1789 if (++BBI != FuncInfo.MF->end())
1790 NextBlock = BBI;
1791
1792 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1793 MVT::Other, CopyTo, CMP,
1794 DAG.getBasicBlock(JT.Default));
1795
1796 if (JT.MBB != NextBlock)
1797 BrCond = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, BrCond,
1798 DAG.getBasicBlock(JT.MBB));
1799
1800 DAG.setRoot(BrCond);
1801 }
1802
1803 /// Codegen a new tail for a stack protector check ParentMBB which has had its
1804 /// tail spliced into a stack protector check success bb.
1805 ///
1806 /// For a high level explanation of how this fits into the stack protector
1807 /// generation see the comment on the declaration of class
1808 /// StackProtectorDescriptor.
visitSPDescriptorParent(StackProtectorDescriptor & SPD,MachineBasicBlock * ParentBB)1809 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
1810 MachineBasicBlock *ParentBB) {
1811
1812 // First create the loads to the guard/stack slot for the comparison.
1813 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1814 EVT PtrTy = TLI.getPointerTy();
1815
1816 MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo();
1817 int FI = MFI->getStackProtectorIndex();
1818
1819 const Value *IRGuard = SPD.getGuard();
1820 SDValue GuardPtr = getValue(IRGuard);
1821 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
1822
1823 unsigned Align =
1824 TLI.getDataLayout()->getPrefTypeAlignment(IRGuard->getType());
1825
1826 SDValue Guard;
1827
1828 // If GuardReg is set and useLoadStackGuardNode returns true, retrieve the
1829 // guard value from the virtual register holding the value. Otherwise, emit a
1830 // volatile load to retrieve the stack guard value.
1831 unsigned GuardReg = SPD.getGuardReg();
1832
1833 if (GuardReg && TLI.useLoadStackGuardNode())
1834 Guard = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), GuardReg,
1835 PtrTy);
1836 else
1837 Guard = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
1838 GuardPtr, MachinePointerInfo(IRGuard, 0),
1839 true, false, false, Align);
1840
1841 SDValue StackSlot = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
1842 StackSlotPtr,
1843 MachinePointerInfo::getFixedStack(FI),
1844 true, false, false, Align);
1845
1846 // Perform the comparison via a subtract/getsetcc.
1847 EVT VT = Guard.getValueType();
1848 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, Guard, StackSlot);
1849
1850 SDValue Cmp =
1851 DAG.getSetCC(getCurSDLoc(), TLI.getSetCCResultType(*DAG.getContext(),
1852 Sub.getValueType()),
1853 Sub, DAG.getConstant(0, VT), ISD::SETNE);
1854
1855 // If the sub is not 0, then we know the guard/stackslot do not equal, so
1856 // branch to failure MBB.
1857 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1858 MVT::Other, StackSlot.getOperand(0),
1859 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
1860 // Otherwise branch to success MBB.
1861 SDValue Br = DAG.getNode(ISD::BR, getCurSDLoc(),
1862 MVT::Other, BrCond,
1863 DAG.getBasicBlock(SPD.getSuccessMBB()));
1864
1865 DAG.setRoot(Br);
1866 }
1867
1868 /// Codegen the failure basic block for a stack protector check.
1869 ///
1870 /// A failure stack protector machine basic block consists simply of a call to
1871 /// __stack_chk_fail().
1872 ///
1873 /// For a high level explanation of how this fits into the stack protector
1874 /// generation see the comment on the declaration of class
1875 /// StackProtectorDescriptor.
1876 void
visitSPDescriptorFailure(StackProtectorDescriptor & SPD)1877 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
1878 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1879 SDValue Chain =
1880 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
1881 nullptr, 0, false, getCurSDLoc(), false, false).second;
1882 DAG.setRoot(Chain);
1883 }
1884
1885 /// visitBitTestHeader - This function emits necessary code to produce value
1886 /// suitable for "bit tests"
visitBitTestHeader(BitTestBlock & B,MachineBasicBlock * SwitchBB)1887 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
1888 MachineBasicBlock *SwitchBB) {
1889 // Subtract the minimum value
1890 SDValue SwitchOp = getValue(B.SValue);
1891 EVT VT = SwitchOp.getValueType();
1892 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, SwitchOp,
1893 DAG.getConstant(B.First, VT));
1894
1895 // Check range
1896 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1897 SDValue RangeCmp =
1898 DAG.getSetCC(getCurSDLoc(), TLI.getSetCCResultType(*DAG.getContext(),
1899 Sub.getValueType()),
1900 Sub, DAG.getConstant(B.Range, VT), ISD::SETUGT);
1901
1902 // Determine the type of the test operands.
1903 bool UsePtrType = false;
1904 if (!TLI.isTypeLegal(VT))
1905 UsePtrType = true;
1906 else {
1907 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
1908 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
1909 // Switch table case range are encoded into series of masks.
1910 // Just use pointer type, it's guaranteed to fit.
1911 UsePtrType = true;
1912 break;
1913 }
1914 }
1915 if (UsePtrType) {
1916 VT = TLI.getPointerTy();
1917 Sub = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), VT);
1918 }
1919
1920 B.RegVT = VT.getSimpleVT();
1921 B.Reg = FuncInfo.CreateReg(B.RegVT);
1922 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(),
1923 B.Reg, Sub);
1924
1925 // Set NextBlock to be the MBB immediately after the current one, if any.
1926 // This is used to avoid emitting unnecessary branches to the next block.
1927 MachineBasicBlock *NextBlock = nullptr;
1928 MachineFunction::iterator BBI = SwitchBB;
1929 if (++BBI != FuncInfo.MF->end())
1930 NextBlock = BBI;
1931
1932 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1933
1934 addSuccessorWithWeight(SwitchBB, B.Default);
1935 addSuccessorWithWeight(SwitchBB, MBB);
1936
1937 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1938 MVT::Other, CopyTo, RangeCmp,
1939 DAG.getBasicBlock(B.Default));
1940
1941 if (MBB != NextBlock)
1942 BrRange = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, CopyTo,
1943 DAG.getBasicBlock(MBB));
1944
1945 DAG.setRoot(BrRange);
1946 }
1947
1948 /// visitBitTestCase - this function produces one "bit test"
visitBitTestCase(BitTestBlock & BB,MachineBasicBlock * NextMBB,uint32_t BranchWeightToNext,unsigned Reg,BitTestCase & B,MachineBasicBlock * SwitchBB)1949 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
1950 MachineBasicBlock* NextMBB,
1951 uint32_t BranchWeightToNext,
1952 unsigned Reg,
1953 BitTestCase &B,
1954 MachineBasicBlock *SwitchBB) {
1955 MVT VT = BB.RegVT;
1956 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
1957 Reg, VT);
1958 SDValue Cmp;
1959 unsigned PopCount = CountPopulation_64(B.Mask);
1960 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1961 if (PopCount == 1) {
1962 // Testing for a single bit; just compare the shift count with what it
1963 // would need to be to shift a 1 bit in that position.
1964 Cmp = DAG.getSetCC(
1965 getCurSDLoc(), TLI.getSetCCResultType(*DAG.getContext(), VT), ShiftOp,
1966 DAG.getConstant(countTrailingZeros(B.Mask), VT), ISD::SETEQ);
1967 } else if (PopCount == BB.Range) {
1968 // There is only one zero bit in the range, test for it directly.
1969 Cmp = DAG.getSetCC(
1970 getCurSDLoc(), TLI.getSetCCResultType(*DAG.getContext(), VT), ShiftOp,
1971 DAG.getConstant(CountTrailingOnes_64(B.Mask), VT), ISD::SETNE);
1972 } else {
1973 // Make desired shift
1974 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurSDLoc(), VT,
1975 DAG.getConstant(1, VT), ShiftOp);
1976
1977 // Emit bit tests and jumps
1978 SDValue AndOp = DAG.getNode(ISD::AND, getCurSDLoc(),
1979 VT, SwitchVal, DAG.getConstant(B.Mask, VT));
1980 Cmp = DAG.getSetCC(getCurSDLoc(),
1981 TLI.getSetCCResultType(*DAG.getContext(), VT), AndOp,
1982 DAG.getConstant(0, VT), ISD::SETNE);
1983 }
1984
1985 // The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight.
1986 addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight);
1987 // The branch weight from SwitchBB to NextMBB is BranchWeightToNext.
1988 addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext);
1989
1990 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1991 MVT::Other, getControlRoot(),
1992 Cmp, DAG.getBasicBlock(B.TargetBB));
1993
1994 // Set NextBlock to be the MBB immediately after the current one, if any.
1995 // This is used to avoid emitting unnecessary branches to the next block.
1996 MachineBasicBlock *NextBlock = nullptr;
1997 MachineFunction::iterator BBI = SwitchBB;
1998 if (++BBI != FuncInfo.MF->end())
1999 NextBlock = BBI;
2000
2001 if (NextMBB != NextBlock)
2002 BrAnd = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, BrAnd,
2003 DAG.getBasicBlock(NextMBB));
2004
2005 DAG.setRoot(BrAnd);
2006 }
2007
visitInvoke(const InvokeInst & I)2008 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2009 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2010
2011 // Retrieve successors.
2012 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2013 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
2014
2015 const Value *Callee(I.getCalledValue());
2016 const Function *Fn = dyn_cast<Function>(Callee);
2017 if (isa<InlineAsm>(Callee))
2018 visitInlineAsm(&I);
2019 else if (Fn && Fn->isIntrinsic()) {
2020 switch (Fn->getIntrinsicID()) {
2021 default:
2022 llvm_unreachable("Cannot invoke this intrinsic");
2023 case Intrinsic::donothing:
2024 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2025 break;
2026 case Intrinsic::experimental_patchpoint_void:
2027 case Intrinsic::experimental_patchpoint_i64:
2028 visitPatchpoint(&I, LandingPad);
2029 break;
2030 }
2031 } else
2032 LowerCallTo(&I, getValue(Callee), false, LandingPad);
2033
2034 // If the value of the invoke is used outside of its defining block, make it
2035 // available as a virtual register.
2036 CopyToExportRegsIfNeeded(&I);
2037
2038 // Update successor info
2039 addSuccessorWithWeight(InvokeMBB, Return);
2040 addSuccessorWithWeight(InvokeMBB, LandingPad);
2041
2042 // Drop into normal successor.
2043 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2044 MVT::Other, getControlRoot(),
2045 DAG.getBasicBlock(Return)));
2046 }
2047
visitResume(const ResumeInst & RI)2048 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2049 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2050 }
2051
visitLandingPad(const LandingPadInst & LP)2052 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2053 assert(FuncInfo.MBB->isLandingPad() &&
2054 "Call to landingpad not in landing pad!");
2055
2056 MachineBasicBlock *MBB = FuncInfo.MBB;
2057 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
2058 AddLandingPadInfo(LP, MMI, MBB);
2059
2060 // If there aren't registers to copy the values into (e.g., during SjLj
2061 // exceptions), then don't bother to create these DAG nodes.
2062 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2063 if (TLI.getExceptionPointerRegister() == 0 &&
2064 TLI.getExceptionSelectorRegister() == 0)
2065 return;
2066
2067 SmallVector<EVT, 2> ValueVTs;
2068 ComputeValueVTs(TLI, LP.getType(), ValueVTs);
2069 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2070
2071 // Get the two live-in registers as SDValues. The physregs have already been
2072 // copied into virtual registers.
2073 SDValue Ops[2];
2074 Ops[0] = DAG.getZExtOrTrunc(
2075 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
2076 FuncInfo.ExceptionPointerVirtReg, TLI.getPointerTy()),
2077 getCurSDLoc(), ValueVTs[0]);
2078 Ops[1] = DAG.getZExtOrTrunc(
2079 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
2080 FuncInfo.ExceptionSelectorVirtReg, TLI.getPointerTy()),
2081 getCurSDLoc(), ValueVTs[1]);
2082
2083 // Merge into one.
2084 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2085 DAG.getVTList(ValueVTs), Ops);
2086 setValue(&LP, Res);
2087 }
2088
2089 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
2090 /// small case ranges).
handleSmallSwitchRange(CaseRec & CR,CaseRecVector & WorkList,const Value * SV,MachineBasicBlock * Default,MachineBasicBlock * SwitchBB)2091 bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
2092 CaseRecVector& WorkList,
2093 const Value* SV,
2094 MachineBasicBlock *Default,
2095 MachineBasicBlock *SwitchBB) {
2096 // Size is the number of Cases represented by this range.
2097 size_t Size = CR.Range.second - CR.Range.first;
2098 if (Size > 3)
2099 return false;
2100
2101 // Get the MachineFunction which holds the current MBB. This is used when
2102 // inserting any additional MBBs necessary to represent the switch.
2103 MachineFunction *CurMF = FuncInfo.MF;
2104
2105 // Figure out which block is immediately after the current one.
2106 MachineBasicBlock *NextBlock = nullptr;
2107 MachineFunction::iterator BBI = CR.CaseBB;
2108
2109 if (++BBI != FuncInfo.MF->end())
2110 NextBlock = BBI;
2111
2112 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2113 // If any two of the cases has the same destination, and if one value
2114 // is the same as the other, but has one bit unset that the other has set,
2115 // use bit manipulation to do two compares at once. For example:
2116 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
2117 // TODO: This could be extended to merge any 2 cases in switches with 3 cases.
2118 // TODO: Handle cases where CR.CaseBB != SwitchBB.
2119 if (Size == 2 && CR.CaseBB == SwitchBB) {
2120 Case &Small = *CR.Range.first;
2121 Case &Big = *(CR.Range.second-1);
2122
2123 if (Small.Low == Small.High && Big.Low == Big.High && Small.BB == Big.BB) {
2124 const APInt& SmallValue = cast<ConstantInt>(Small.Low)->getValue();
2125 const APInt& BigValue = cast<ConstantInt>(Big.Low)->getValue();
2126
2127 // Check that there is only one bit different.
2128 if (BigValue.countPopulation() == SmallValue.countPopulation() + 1 &&
2129 (SmallValue | BigValue) == BigValue) {
2130 // Isolate the common bit.
2131 APInt CommonBit = BigValue & ~SmallValue;
2132 assert((SmallValue | CommonBit) == BigValue &&
2133 CommonBit.countPopulation() == 1 && "Not a common bit?");
2134
2135 SDValue CondLHS = getValue(SV);
2136 EVT VT = CondLHS.getValueType();
2137 SDLoc DL = getCurSDLoc();
2138
2139 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
2140 DAG.getConstant(CommonBit, VT));
2141 SDValue Cond = DAG.getSetCC(DL, MVT::i1,
2142 Or, DAG.getConstant(BigValue, VT),
2143 ISD::SETEQ);
2144
2145 // Update successor info.
2146 // Both Small and Big will jump to Small.BB, so we sum up the weights.
2147 addSuccessorWithWeight(SwitchBB, Small.BB,
2148 Small.ExtraWeight + Big.ExtraWeight);
2149 addSuccessorWithWeight(SwitchBB, Default,
2150 // The default destination is the first successor in IR.
2151 BPI ? BPI->getEdgeWeight(SwitchBB->getBasicBlock(), (unsigned)0) : 0);
2152
2153 // Insert the true branch.
2154 SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other,
2155 getControlRoot(), Cond,
2156 DAG.getBasicBlock(Small.BB));
2157
2158 // Insert the false branch.
2159 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
2160 DAG.getBasicBlock(Default));
2161
2162 DAG.setRoot(BrCond);
2163 return true;
2164 }
2165 }
2166 }
2167
2168 // Order cases by weight so the most likely case will be checked first.
2169 uint32_t UnhandledWeights = 0;
2170 if (BPI) {
2171 for (CaseItr I = CR.Range.first, IE = CR.Range.second; I != IE; ++I) {
2172 uint32_t IWeight = I->ExtraWeight;
2173 UnhandledWeights += IWeight;
2174 for (CaseItr J = CR.Range.first; J < I; ++J) {
2175 uint32_t JWeight = J->ExtraWeight;
2176 if (IWeight > JWeight)
2177 std::swap(*I, *J);
2178 }
2179 }
2180 }
2181 // Rearrange the case blocks so that the last one falls through if possible.
2182 Case &BackCase = *(CR.Range.second-1);
2183 if (Size > 1 &&
2184 NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
2185 // The last case block won't fall through into 'NextBlock' if we emit the
2186 // branches in this order. See if rearranging a case value would help.
2187 // We start at the bottom as it's the case with the least weight.
2188 for (Case *I = &*(CR.Range.second-2), *E = &*CR.Range.first-1; I != E; --I)
2189 if (I->BB == NextBlock) {
2190 std::swap(*I, BackCase);
2191 break;
2192 }
2193 }
2194
2195 // Create a CaseBlock record representing a conditional branch to
2196 // the Case's target mbb if the value being switched on SV is equal
2197 // to C.
2198 MachineBasicBlock *CurBlock = CR.CaseBB;
2199 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
2200 MachineBasicBlock *FallThrough;
2201 if (I != E-1) {
2202 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
2203 CurMF->insert(BBI, FallThrough);
2204
2205 // Put SV in a virtual register to make it available from the new blocks.
2206 ExportFromCurrentBlock(SV);
2207 } else {
2208 // If the last case doesn't match, go to the default block.
2209 FallThrough = Default;
2210 }
2211
2212 const Value *RHS, *LHS, *MHS;
2213 ISD::CondCode CC;
2214 if (I->High == I->Low) {
2215 // This is just small small case range :) containing exactly 1 case
2216 CC = ISD::SETEQ;
2217 LHS = SV; RHS = I->High; MHS = nullptr;
2218 } else {
2219 CC = ISD::SETLE;
2220 LHS = I->Low; MHS = SV; RHS = I->High;
2221 }
2222
2223 // The false weight should be sum of all un-handled cases.
2224 UnhandledWeights -= I->ExtraWeight;
2225 CaseBlock CB(CC, LHS, RHS, MHS, /* truebb */ I->BB, /* falsebb */ FallThrough,
2226 /* me */ CurBlock,
2227 /* trueweight */ I->ExtraWeight,
2228 /* falseweight */ UnhandledWeights);
2229
2230 // If emitting the first comparison, just call visitSwitchCase to emit the
2231 // code into the current block. Otherwise, push the CaseBlock onto the
2232 // vector to be later processed by SDISel, and insert the node's MBB
2233 // before the next MBB.
2234 if (CurBlock == SwitchBB)
2235 visitSwitchCase(CB, SwitchBB);
2236 else
2237 SwitchCases.push_back(CB);
2238
2239 CurBlock = FallThrough;
2240 }
2241
2242 return true;
2243 }
2244
areJTsAllowed(const TargetLowering & TLI)2245 static inline bool areJTsAllowed(const TargetLowering &TLI) {
2246 return TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
2247 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
2248 }
2249
ComputeRange(const APInt & First,const APInt & Last)2250 static APInt ComputeRange(const APInt &First, const APInt &Last) {
2251 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
2252 APInt LastExt = Last.sext(BitWidth), FirstExt = First.sext(BitWidth);
2253 return (LastExt - FirstExt + 1ULL);
2254 }
2255
2256 /// handleJTSwitchCase - Emit jumptable for current switch case range
handleJTSwitchCase(CaseRec & CR,CaseRecVector & WorkList,const Value * SV,MachineBasicBlock * Default,MachineBasicBlock * SwitchBB)2257 bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
2258 CaseRecVector &WorkList,
2259 const Value *SV,
2260 MachineBasicBlock *Default,
2261 MachineBasicBlock *SwitchBB) {
2262 Case& FrontCase = *CR.Range.first;
2263 Case& BackCase = *(CR.Range.second-1);
2264
2265 const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
2266 const APInt &Last = cast<ConstantInt>(BackCase.High)->getValue();
2267
2268 APInt TSize(First.getBitWidth(), 0);
2269 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I)
2270 TSize += I->size();
2271
2272 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2273 if (!areJTsAllowed(TLI) || TSize.ult(TLI.getMinimumJumpTableEntries()))
2274 return false;
2275
2276 APInt Range = ComputeRange(First, Last);
2277 // The density is TSize / Range. Require at least 40%.
2278 // It should not be possible for IntTSize to saturate for sane code, but make
2279 // sure we handle Range saturation correctly.
2280 uint64_t IntRange = Range.getLimitedValue(UINT64_MAX/10);
2281 uint64_t IntTSize = TSize.getLimitedValue(UINT64_MAX/10);
2282 if (IntTSize * 10 < IntRange * 4)
2283 return false;
2284
2285 DEBUG(dbgs() << "Lowering jump table\n"
2286 << "First entry: " << First << ". Last entry: " << Last << '\n'
2287 << "Range: " << Range << ". Size: " << TSize << ".\n\n");
2288
2289 // Get the MachineFunction which holds the current MBB. This is used when
2290 // inserting any additional MBBs necessary to represent the switch.
2291 MachineFunction *CurMF = FuncInfo.MF;
2292
2293 // Figure out which block is immediately after the current one.
2294 MachineFunction::iterator BBI = CR.CaseBB;
2295 ++BBI;
2296
2297 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2298
2299 // Create a new basic block to hold the code for loading the address
2300 // of the jump table, and jumping to it. Update successor information;
2301 // we will either branch to the default case for the switch, or the jump
2302 // table.
2303 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2304 CurMF->insert(BBI, JumpTableBB);
2305
2306 addSuccessorWithWeight(CR.CaseBB, Default);
2307 addSuccessorWithWeight(CR.CaseBB, JumpTableBB);
2308
2309 // Build a vector of destination BBs, corresponding to each target
2310 // of the jump table. If the value of the jump table slot corresponds to
2311 // a case statement, push the case's BB onto the vector, otherwise, push
2312 // the default BB.
2313 std::vector<MachineBasicBlock*> DestBBs;
2314 APInt TEI = First;
2315 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
2316 const APInt &Low = cast<ConstantInt>(I->Low)->getValue();
2317 const APInt &High = cast<ConstantInt>(I->High)->getValue();
2318
2319 if (Low.sle(TEI) && TEI.sle(High)) {
2320 DestBBs.push_back(I->BB);
2321 if (TEI==High)
2322 ++I;
2323 } else {
2324 DestBBs.push_back(Default);
2325 }
2326 }
2327
2328 // Calculate weight for each unique destination in CR.
2329 DenseMap<MachineBasicBlock*, uint32_t> DestWeights;
2330 if (FuncInfo.BPI)
2331 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
2332 DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
2333 DestWeights.find(I->BB);
2334 if (Itr != DestWeights.end())
2335 Itr->second += I->ExtraWeight;
2336 else
2337 DestWeights[I->BB] = I->ExtraWeight;
2338 }
2339
2340 // Update successor info. Add one edge to each unique successor.
2341 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
2342 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
2343 E = DestBBs.end(); I != E; ++I) {
2344 if (!SuccsHandled[(*I)->getNumber()]) {
2345 SuccsHandled[(*I)->getNumber()] = true;
2346 DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
2347 DestWeights.find(*I);
2348 addSuccessorWithWeight(JumpTableBB, *I,
2349 Itr != DestWeights.end() ? Itr->second : 0);
2350 }
2351 }
2352
2353 // Create a jump table index for this jump table.
2354 unsigned JTEncoding = TLI.getJumpTableEncoding();
2355 unsigned JTI = CurMF->getOrCreateJumpTableInfo(JTEncoding)
2356 ->createJumpTableIndex(DestBBs);
2357
2358 // Set the jump table information so that we can codegen it as a second
2359 // MachineBasicBlock
2360 JumpTable JT(-1U, JTI, JumpTableBB, Default);
2361 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == SwitchBB));
2362 if (CR.CaseBB == SwitchBB)
2363 visitJumpTableHeader(JT, JTH, SwitchBB);
2364
2365 JTCases.push_back(JumpTableBlock(JTH, JT));
2366 return true;
2367 }
2368
2369 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
2370 /// 2 subtrees.
handleBTSplitSwitchCase(CaseRec & CR,CaseRecVector & WorkList,const Value * SV,MachineBasicBlock * SwitchBB)2371 bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
2372 CaseRecVector& WorkList,
2373 const Value* SV,
2374 MachineBasicBlock* SwitchBB) {
2375 // Get the MachineFunction which holds the current MBB. This is used when
2376 // inserting any additional MBBs necessary to represent the switch.
2377 MachineFunction *CurMF = FuncInfo.MF;
2378
2379 // Figure out which block is immediately after the current one.
2380 MachineFunction::iterator BBI = CR.CaseBB;
2381 ++BBI;
2382
2383 Case& FrontCase = *CR.Range.first;
2384 Case& BackCase = *(CR.Range.second-1);
2385 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2386
2387 // Size is the number of Cases represented by this range.
2388 unsigned Size = CR.Range.second - CR.Range.first;
2389
2390 const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
2391 const APInt &Last = cast<ConstantInt>(BackCase.High)->getValue();
2392 double FMetric = 0;
2393 CaseItr Pivot = CR.Range.first + Size/2;
2394
2395 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
2396 // (heuristically) allow us to emit JumpTable's later.
2397 APInt TSize(First.getBitWidth(), 0);
2398 for (CaseItr I = CR.Range.first, E = CR.Range.second;
2399 I!=E; ++I)
2400 TSize += I->size();
2401
2402 APInt LSize = FrontCase.size();
2403 APInt RSize = TSize-LSize;
2404 DEBUG(dbgs() << "Selecting best pivot: \n"
2405 << "First: " << First << ", Last: " << Last <<'\n'
2406 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
2407 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
2408 J!=E; ++I, ++J) {
2409 const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
2410 const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
2411 APInt Range = ComputeRange(LEnd, RBegin);
2412 assert((Range - 2ULL).isNonNegative() &&
2413 "Invalid case distance");
2414 // Use volatile double here to avoid excess precision issues on some hosts,
2415 // e.g. that use 80-bit X87 registers.
2416 volatile double LDensity =
2417 (double)LSize.roundToDouble() /
2418 (LEnd - First + 1ULL).roundToDouble();
2419 volatile double RDensity =
2420 (double)RSize.roundToDouble() /
2421 (Last - RBegin + 1ULL).roundToDouble();
2422 volatile double Metric = Range.logBase2()*(LDensity+RDensity);
2423 // Should always split in some non-trivial place
2424 DEBUG(dbgs() <<"=>Step\n"
2425 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
2426 << "LDensity: " << LDensity
2427 << ", RDensity: " << RDensity << '\n'
2428 << "Metric: " << Metric << '\n');
2429 if (FMetric < Metric) {
2430 Pivot = J;
2431 FMetric = Metric;
2432 DEBUG(dbgs() << "Current metric set to: " << FMetric << '\n');
2433 }
2434
2435 LSize += J->size();
2436 RSize -= J->size();
2437 }
2438
2439 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2440 if (areJTsAllowed(TLI)) {
2441 // If our case is dense we *really* should handle it earlier!
2442 assert((FMetric > 0) && "Should handle dense range earlier!");
2443 } else {
2444 Pivot = CR.Range.first + Size/2;
2445 }
2446
2447 CaseRange LHSR(CR.Range.first, Pivot);
2448 CaseRange RHSR(Pivot, CR.Range.second);
2449 const Constant *C = Pivot->Low;
2450 MachineBasicBlock *FalseBB = nullptr, *TrueBB = nullptr;
2451
2452 // We know that we branch to the LHS if the Value being switched on is
2453 // less than the Pivot value, C. We use this to optimize our binary
2454 // tree a bit, by recognizing that if SV is greater than or equal to the
2455 // LHS's Case Value, and that Case Value is exactly one less than the
2456 // Pivot's Value, then we can branch directly to the LHS's Target,
2457 // rather than creating a leaf node for it.
2458 if ((LHSR.second - LHSR.first) == 1 &&
2459 LHSR.first->High == CR.GE &&
2460 cast<ConstantInt>(C)->getValue() ==
2461 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
2462 TrueBB = LHSR.first->BB;
2463 } else {
2464 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2465 CurMF->insert(BBI, TrueBB);
2466 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
2467
2468 // Put SV in a virtual register to make it available from the new blocks.
2469 ExportFromCurrentBlock(SV);
2470 }
2471
2472 // Similar to the optimization above, if the Value being switched on is
2473 // known to be less than the Constant CR.LT, and the current Case Value
2474 // is CR.LT - 1, then we can branch directly to the target block for
2475 // the current Case Value, rather than emitting a RHS leaf node for it.
2476 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
2477 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
2478 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
2479 FalseBB = RHSR.first->BB;
2480 } else {
2481 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2482 CurMF->insert(BBI, FalseBB);
2483 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
2484
2485 // Put SV in a virtual register to make it available from the new blocks.
2486 ExportFromCurrentBlock(SV);
2487 }
2488
2489 // Create a CaseBlock record representing a conditional branch to
2490 // the LHS node if the value being switched on SV is less than C.
2491 // Otherwise, branch to LHS.
2492 CaseBlock CB(ISD::SETLT, SV, C, nullptr, TrueBB, FalseBB, CR.CaseBB);
2493
2494 if (CR.CaseBB == SwitchBB)
2495 visitSwitchCase(CB, SwitchBB);
2496 else
2497 SwitchCases.push_back(CB);
2498
2499 return true;
2500 }
2501
2502 /// handleBitTestsSwitchCase - if current case range has few destination and
2503 /// range span less, than machine word bitwidth, encode case range into series
2504 /// of masks and emit bit tests with these masks.
handleBitTestsSwitchCase(CaseRec & CR,CaseRecVector & WorkList,const Value * SV,MachineBasicBlock * Default,MachineBasicBlock * SwitchBB)2505 bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
2506 CaseRecVector& WorkList,
2507 const Value* SV,
2508 MachineBasicBlock* Default,
2509 MachineBasicBlock* SwitchBB) {
2510 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2511 EVT PTy = TLI.getPointerTy();
2512 unsigned IntPtrBits = PTy.getSizeInBits();
2513
2514 Case& FrontCase = *CR.Range.first;
2515 Case& BackCase = *(CR.Range.second-1);
2516
2517 // Get the MachineFunction which holds the current MBB. This is used when
2518 // inserting any additional MBBs necessary to represent the switch.
2519 MachineFunction *CurMF = FuncInfo.MF;
2520
2521 // If target does not have legal shift left, do not emit bit tests at all.
2522 if (!TLI.isOperationLegal(ISD::SHL, PTy))
2523 return false;
2524
2525 size_t numCmps = 0;
2526 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
2527 // Single case counts one, case range - two.
2528 numCmps += (I->Low == I->High ? 1 : 2);
2529 }
2530
2531 // Count unique destinations
2532 SmallSet<MachineBasicBlock*, 4> Dests;
2533 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
2534 Dests.insert(I->BB);
2535 if (Dests.size() > 3)
2536 // Don't bother the code below, if there are too much unique destinations
2537 return false;
2538 }
2539 DEBUG(dbgs() << "Total number of unique destinations: "
2540 << Dests.size() << '\n'
2541 << "Total number of comparisons: " << numCmps << '\n');
2542
2543 // Compute span of values.
2544 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
2545 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
2546 APInt cmpRange = maxValue - minValue;
2547
2548 DEBUG(dbgs() << "Compare range: " << cmpRange << '\n'
2549 << "Low bound: " << minValue << '\n'
2550 << "High bound: " << maxValue << '\n');
2551
2552 if (cmpRange.uge(IntPtrBits) ||
2553 (!(Dests.size() == 1 && numCmps >= 3) &&
2554 !(Dests.size() == 2 && numCmps >= 5) &&
2555 !(Dests.size() >= 3 && numCmps >= 6)))
2556 return false;
2557
2558 DEBUG(dbgs() << "Emitting bit tests\n");
2559 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
2560
2561 // Optimize the case where all the case values fit in a
2562 // word without having to subtract minValue. In this case,
2563 // we can optimize away the subtraction.
2564 if (minValue.isNonNegative() && maxValue.slt(IntPtrBits)) {
2565 cmpRange = maxValue;
2566 } else {
2567 lowBound = minValue;
2568 }
2569
2570 CaseBitsVector CasesBits;
2571 unsigned i, count = 0;
2572
2573 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
2574 MachineBasicBlock* Dest = I->BB;
2575 for (i = 0; i < count; ++i)
2576 if (Dest == CasesBits[i].BB)
2577 break;
2578
2579 if (i == count) {
2580 assert((count < 3) && "Too much destinations to test!");
2581 CasesBits.push_back(CaseBits(0, Dest, 0, 0/*Weight*/));
2582 count++;
2583 }
2584
2585 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
2586 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
2587
2588 uint64_t lo = (lowValue - lowBound).getZExtValue();
2589 uint64_t hi = (highValue - lowBound).getZExtValue();
2590 CasesBits[i].ExtraWeight += I->ExtraWeight;
2591
2592 for (uint64_t j = lo; j <= hi; j++) {
2593 CasesBits[i].Mask |= 1ULL << j;
2594 CasesBits[i].Bits++;
2595 }
2596
2597 }
2598 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
2599
2600 BitTestInfo BTC;
2601
2602 // Figure out which block is immediately after the current one.
2603 MachineFunction::iterator BBI = CR.CaseBB;
2604 ++BBI;
2605
2606 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2607
2608 DEBUG(dbgs() << "Cases:\n");
2609 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2610 DEBUG(dbgs() << "Mask: " << CasesBits[i].Mask
2611 << ", Bits: " << CasesBits[i].Bits
2612 << ", BB: " << CasesBits[i].BB << '\n');
2613
2614 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2615 CurMF->insert(BBI, CaseBB);
2616 BTC.push_back(BitTestCase(CasesBits[i].Mask,
2617 CaseBB,
2618 CasesBits[i].BB, CasesBits[i].ExtraWeight));
2619
2620 // Put SV in a virtual register to make it available from the new blocks.
2621 ExportFromCurrentBlock(SV);
2622 }
2623
2624 BitTestBlock BTB(lowBound, cmpRange, SV,
2625 -1U, MVT::Other, (CR.CaseBB == SwitchBB),
2626 CR.CaseBB, Default, std::move(BTC));
2627
2628 if (CR.CaseBB == SwitchBB)
2629 visitBitTestHeader(BTB, SwitchBB);
2630
2631 BitTestCases.push_back(std::move(BTB));
2632
2633 return true;
2634 }
2635
2636 /// Clusterify - Transform simple list of Cases into list of CaseRange's
Clusterify(CaseVector & Cases,const SwitchInst & SI)2637 void SelectionDAGBuilder::Clusterify(CaseVector& Cases,
2638 const SwitchInst& SI) {
2639 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2640 // Start with "simple" cases.
2641 for (SwitchInst::ConstCaseIt i : SI.cases()) {
2642 const BasicBlock *SuccBB = i.getCaseSuccessor();
2643 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB];
2644
2645 uint32_t ExtraWeight =
2646 BPI ? BPI->getEdgeWeight(SI.getParent(), i.getSuccessorIndex()) : 0;
2647
2648 Cases.push_back(Case(i.getCaseValue(), i.getCaseValue(),
2649 SMBB, ExtraWeight));
2650 }
2651 std::sort(Cases.begin(), Cases.end(), CaseCmp());
2652
2653 // Merge case into clusters
2654 if (Cases.size() >= 2)
2655 // Must recompute end() each iteration because it may be
2656 // invalidated by erase if we hold on to it
2657 for (CaseItr I = Cases.begin(), J = std::next(Cases.begin());
2658 J != Cases.end(); ) {
2659 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2660 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2661 MachineBasicBlock* nextBB = J->BB;
2662 MachineBasicBlock* currentBB = I->BB;
2663
2664 // If the two neighboring cases go to the same destination, merge them
2665 // into a single case.
2666 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2667 I->High = J->High;
2668 I->ExtraWeight += J->ExtraWeight;
2669 J = Cases.erase(J);
2670 } else {
2671 I = J++;
2672 }
2673 }
2674
2675 DEBUG({
2676 size_t numCmps = 0;
2677 for (auto &I : Cases)
2678 // A range counts double, since it requires two compares.
2679 numCmps += I.Low != I.High ? 2 : 1;
2680
2681 dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
2682 << ". Total compares: " << numCmps << '\n';
2683 });
2684 }
2685
UpdateSplitBlock(MachineBasicBlock * First,MachineBasicBlock * Last)2686 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2687 MachineBasicBlock *Last) {
2688 // Update JTCases.
2689 for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2690 if (JTCases[i].first.HeaderBB == First)
2691 JTCases[i].first.HeaderBB = Last;
2692
2693 // Update BitTestCases.
2694 for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2695 if (BitTestCases[i].Parent == First)
2696 BitTestCases[i].Parent = Last;
2697 }
2698
visitSwitch(const SwitchInst & SI)2699 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
2700 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
2701
2702 // Figure out which block is immediately after the current one.
2703 MachineBasicBlock *NextBlock = nullptr;
2704 if (SwitchMBB + 1 != FuncInfo.MF->end())
2705 NextBlock = SwitchMBB + 1;
2706
2707
2708 // Create a vector of Cases, sorted so that we can efficiently create a binary
2709 // search tree from them.
2710 CaseVector Cases;
2711 Clusterify(Cases, SI);
2712
2713 // Get the default destination MBB.
2714 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2715
2716 if (isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg()) &&
2717 !Cases.empty()) {
2718 // Replace an unreachable default destination with the most popular case
2719 // destination.
2720 DenseMap<const BasicBlock *, unsigned> Popularity;
2721 unsigned MaxPop = 0;
2722 const BasicBlock *MaxBB = nullptr;
2723 for (auto I : SI.cases()) {
2724 const BasicBlock *BB = I.getCaseSuccessor();
2725 if (++Popularity[BB] > MaxPop) {
2726 MaxPop = Popularity[BB];
2727 MaxBB = BB;
2728 }
2729 }
2730
2731 // Set new default.
2732 assert(MaxPop > 0);
2733 assert(MaxBB);
2734 Default = FuncInfo.MBBMap[MaxBB];
2735
2736 // Remove cases that were pointing to the destination that is now the default.
2737 Cases.erase(std::remove_if(Cases.begin(), Cases.end(),
2738 [&](const Case &C) { return C.BB == Default; }),
2739 Cases.end());
2740 }
2741
2742 // If there is only the default destination, go there directly.
2743 if (Cases.empty()) {
2744 // Update machine-CFG edges.
2745 SwitchMBB->addSuccessor(Default);
2746
2747 // If this is not a fall-through branch, emit the branch.
2748 if (Default != NextBlock) {
2749 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2750 getControlRoot(), DAG.getBasicBlock(Default)));
2751 }
2752 return;
2753 }
2754
2755 // Get the Value to be switched on.
2756 const Value *SV = SI.getCondition();
2757
2758 // Push the initial CaseRec onto the worklist
2759 CaseRecVector WorkList;
2760 WorkList.push_back(CaseRec(SwitchMBB,nullptr,nullptr,
2761 CaseRange(Cases.begin(),Cases.end())));
2762
2763 while (!WorkList.empty()) {
2764 // Grab a record representing a case range to process off the worklist
2765 CaseRec CR = WorkList.back();
2766 WorkList.pop_back();
2767
2768 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
2769 continue;
2770
2771 // If the range has few cases (two or less) emit a series of specific
2772 // tests.
2773 if (handleSmallSwitchRange(CR, WorkList, SV, Default, SwitchMBB))
2774 continue;
2775
2776 // If the switch has more than N blocks, and is at least 40% dense, and the
2777 // target supports indirect branches, then emit a jump table rather than
2778 // lowering the switch to a binary tree of conditional branches.
2779 // N defaults to 4 and is controlled via TLS.getMinimumJumpTableEntries().
2780 if (handleJTSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
2781 continue;
2782
2783 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2784 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2785 handleBTSplitSwitchCase(CR, WorkList, SV, SwitchMBB);
2786 }
2787 }
2788
visitIndirectBr(const IndirectBrInst & I)2789 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2790 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2791
2792 // Update machine-CFG edges with unique successors.
2793 SmallSet<BasicBlock*, 32> Done;
2794 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2795 BasicBlock *BB = I.getSuccessor(i);
2796 bool Inserted = Done.insert(BB).second;
2797 if (!Inserted)
2798 continue;
2799
2800 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2801 addSuccessorWithWeight(IndirectBrMBB, Succ);
2802 }
2803
2804 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2805 MVT::Other, getControlRoot(),
2806 getValue(I.getAddress())));
2807 }
2808
visitUnreachable(const UnreachableInst & I)2809 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2810 if (DAG.getTarget().Options.TrapUnreachable)
2811 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2812 }
2813
visitFSub(const User & I)2814 void SelectionDAGBuilder::visitFSub(const User &I) {
2815 // -0.0 - X --> fneg
2816 Type *Ty = I.getType();
2817 if (isa<Constant>(I.getOperand(0)) &&
2818 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2819 SDValue Op2 = getValue(I.getOperand(1));
2820 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2821 Op2.getValueType(), Op2));
2822 return;
2823 }
2824
2825 visitBinary(I, ISD::FSUB);
2826 }
2827
visitBinary(const User & I,unsigned OpCode)2828 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
2829 SDValue Op1 = getValue(I.getOperand(0));
2830 SDValue Op2 = getValue(I.getOperand(1));
2831
2832 bool nuw = false;
2833 bool nsw = false;
2834 bool exact = false;
2835 if (const OverflowingBinaryOperator *OFBinOp =
2836 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2837 nuw = OFBinOp->hasNoUnsignedWrap();
2838 nsw = OFBinOp->hasNoSignedWrap();
2839 }
2840 if (const PossiblyExactOperator *ExactOp =
2841 dyn_cast<const PossiblyExactOperator>(&I))
2842 exact = ExactOp->isExact();
2843
2844 SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
2845 Op1, Op2, nuw, nsw, exact);
2846 setValue(&I, BinNodeValue);
2847 }
2848
visitShift(const User & I,unsigned Opcode)2849 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2850 SDValue Op1 = getValue(I.getOperand(0));
2851 SDValue Op2 = getValue(I.getOperand(1));
2852
2853 EVT ShiftTy =
2854 DAG.getTargetLoweringInfo().getShiftAmountTy(Op2.getValueType());
2855
2856 // Coerce the shift amount to the right type if we can.
2857 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2858 unsigned ShiftSize = ShiftTy.getSizeInBits();
2859 unsigned Op2Size = Op2.getValueType().getSizeInBits();
2860 SDLoc DL = getCurSDLoc();
2861
2862 // If the operand is smaller than the shift count type, promote it.
2863 if (ShiftSize > Op2Size)
2864 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2865
2866 // If the operand is larger than the shift count type but the shift
2867 // count type has enough bits to represent any shift value, truncate
2868 // it now. This is a common case and it exposes the truncate to
2869 // optimization early.
2870 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2871 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2872 // Otherwise we'll need to temporarily settle for some other convenient
2873 // type. Type legalization will make adjustments once the shiftee is split.
2874 else
2875 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2876 }
2877
2878 bool nuw = false;
2879 bool nsw = false;
2880 bool exact = false;
2881
2882 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2883
2884 if (const OverflowingBinaryOperator *OFBinOp =
2885 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2886 nuw = OFBinOp->hasNoUnsignedWrap();
2887 nsw = OFBinOp->hasNoSignedWrap();
2888 }
2889 if (const PossiblyExactOperator *ExactOp =
2890 dyn_cast<const PossiblyExactOperator>(&I))
2891 exact = ExactOp->isExact();
2892 }
2893
2894 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2895 nuw, nsw, exact);
2896 setValue(&I, Res);
2897 }
2898
visitSDiv(const User & I)2899 void SelectionDAGBuilder::visitSDiv(const User &I) {
2900 SDValue Op1 = getValue(I.getOperand(0));
2901 SDValue Op2 = getValue(I.getOperand(1));
2902
2903 // Turn exact SDivs into multiplications.
2904 // FIXME: This should be in DAGCombiner, but it doesn't have access to the
2905 // exact bit.
2906 if (isa<BinaryOperator>(&I) && cast<BinaryOperator>(&I)->isExact() &&
2907 !isa<ConstantSDNode>(Op1) &&
2908 isa<ConstantSDNode>(Op2) && !cast<ConstantSDNode>(Op2)->isNullValue())
2909 setValue(&I, DAG.getTargetLoweringInfo()
2910 .BuildExactSDIV(Op1, Op2, getCurSDLoc(), DAG));
2911 else
2912 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(),
2913 Op1, Op2));
2914 }
2915
visitICmp(const User & I)2916 void SelectionDAGBuilder::visitICmp(const User &I) {
2917 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2918 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2919 predicate = IC->getPredicate();
2920 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2921 predicate = ICmpInst::Predicate(IC->getPredicate());
2922 SDValue Op1 = getValue(I.getOperand(0));
2923 SDValue Op2 = getValue(I.getOperand(1));
2924 ISD::CondCode Opcode = getICmpCondCode(predicate);
2925
2926 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2927 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2928 }
2929
visitFCmp(const User & I)2930 void SelectionDAGBuilder::visitFCmp(const User &I) {
2931 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2932 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2933 predicate = FC->getPredicate();
2934 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2935 predicate = FCmpInst::Predicate(FC->getPredicate());
2936 SDValue Op1 = getValue(I.getOperand(0));
2937 SDValue Op2 = getValue(I.getOperand(1));
2938 ISD::CondCode Condition = getFCmpCondCode(predicate);
2939 if (TM.Options.NoNaNsFPMath)
2940 Condition = getFCmpCodeWithoutNaN(Condition);
2941 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2942 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2943 }
2944
visitSelect(const User & I)2945 void SelectionDAGBuilder::visitSelect(const User &I) {
2946 SmallVector<EVT, 4> ValueVTs;
2947 ComputeValueVTs(DAG.getTargetLoweringInfo(), I.getType(), ValueVTs);
2948 unsigned NumValues = ValueVTs.size();
2949 if (NumValues == 0) return;
2950
2951 SmallVector<SDValue, 4> Values(NumValues);
2952 SDValue Cond = getValue(I.getOperand(0));
2953 SDValue TrueVal = getValue(I.getOperand(1));
2954 SDValue FalseVal = getValue(I.getOperand(2));
2955 ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2956 ISD::VSELECT : ISD::SELECT;
2957
2958 for (unsigned i = 0; i != NumValues; ++i)
2959 Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
2960 TrueVal.getNode()->getValueType(TrueVal.getResNo()+i),
2961 Cond,
2962 SDValue(TrueVal.getNode(),
2963 TrueVal.getResNo() + i),
2964 SDValue(FalseVal.getNode(),
2965 FalseVal.getResNo() + i));
2966
2967 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2968 DAG.getVTList(ValueVTs), Values));
2969 }
2970
visitTrunc(const User & I)2971 void SelectionDAGBuilder::visitTrunc(const User &I) {
2972 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2973 SDValue N = getValue(I.getOperand(0));
2974 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2975 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
2976 }
2977
visitZExt(const User & I)2978 void SelectionDAGBuilder::visitZExt(const User &I) {
2979 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2980 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2981 SDValue N = getValue(I.getOperand(0));
2982 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2983 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
2984 }
2985
visitSExt(const User & I)2986 void SelectionDAGBuilder::visitSExt(const User &I) {
2987 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2988 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2989 SDValue N = getValue(I.getOperand(0));
2990 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2991 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
2992 }
2993
visitFPTrunc(const User & I)2994 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
2995 // FPTrunc is never a no-op cast, no need to check
2996 SDValue N = getValue(I.getOperand(0));
2997 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2998 EVT DestVT = TLI.getValueType(I.getType());
2999 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurSDLoc(), DestVT, N,
3000 DAG.getTargetConstant(0, TLI.getPointerTy())));
3001 }
3002
visitFPExt(const User & I)3003 void SelectionDAGBuilder::visitFPExt(const User &I) {
3004 // FPExt is never a no-op cast, no need to check
3005 SDValue N = getValue(I.getOperand(0));
3006 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
3007 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3008 }
3009
visitFPToUI(const User & I)3010 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3011 // FPToUI is never a no-op cast, no need to check
3012 SDValue N = getValue(I.getOperand(0));
3013 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
3014 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3015 }
3016
visitFPToSI(const User & I)3017 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3018 // FPToSI is never a no-op cast, no need to check
3019 SDValue N = getValue(I.getOperand(0));
3020 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
3021 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3022 }
3023
visitUIToFP(const User & I)3024 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3025 // UIToFP is never a no-op cast, no need to check
3026 SDValue N = getValue(I.getOperand(0));
3027 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
3028 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3029 }
3030
visitSIToFP(const User & I)3031 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3032 // SIToFP is never a no-op cast, no need to check
3033 SDValue N = getValue(I.getOperand(0));
3034 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
3035 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3036 }
3037
visitPtrToInt(const User & I)3038 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3039 // What to do depends on the size of the integer and the size of the pointer.
3040 // We can either truncate, zero extend, or no-op, accordingly.
3041 SDValue N = getValue(I.getOperand(0));
3042 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
3043 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3044 }
3045
visitIntToPtr(const User & I)3046 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3047 // What to do depends on the size of the integer and the size of the pointer.
3048 // We can either truncate, zero extend, or no-op, accordingly.
3049 SDValue N = getValue(I.getOperand(0));
3050 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
3051 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3052 }
3053
visitBitCast(const User & I)3054 void SelectionDAGBuilder::visitBitCast(const User &I) {
3055 SDValue N = getValue(I.getOperand(0));
3056 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
3057
3058 // BitCast assures us that source and destination are the same size so this is
3059 // either a BITCAST or a no-op.
3060 if (DestVT != N.getValueType())
3061 setValue(&I, DAG.getNode(ISD::BITCAST, getCurSDLoc(),
3062 DestVT, N)); // convert types.
3063 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3064 // might fold any kind of constant expression to an integer constant and that
3065 // is not what we are looking for. Only regcognize a bitcast of a genuine
3066 // constant integer as an opaque constant.
3067 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3068 setValue(&I, DAG.getConstant(C->getValue(), DestVT, /*isTarget=*/false,
3069 /*isOpaque*/true));
3070 else
3071 setValue(&I, N); // noop cast.
3072 }
3073
visitAddrSpaceCast(const User & I)3074 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3075 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3076 const Value *SV = I.getOperand(0);
3077 SDValue N = getValue(SV);
3078 EVT DestVT = TLI.getValueType(I.getType());
3079
3080 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3081 unsigned DestAS = I.getType()->getPointerAddressSpace();
3082
3083 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3084 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3085
3086 setValue(&I, N);
3087 }
3088
visitInsertElement(const User & I)3089 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3090 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3091 SDValue InVec = getValue(I.getOperand(0));
3092 SDValue InVal = getValue(I.getOperand(1));
3093 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)),
3094 getCurSDLoc(), TLI.getVectorIdxTy());
3095 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3096 TLI.getValueType(I.getType()), InVec, InVal, InIdx));
3097 }
3098
visitExtractElement(const User & I)3099 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3100 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3101 SDValue InVec = getValue(I.getOperand(0));
3102 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)),
3103 getCurSDLoc(), TLI.getVectorIdxTy());
3104 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3105 TLI.getValueType(I.getType()), InVec, InIdx));
3106 }
3107
3108 // Utility for visitShuffleVector - Return true if every element in Mask,
3109 // beginning from position Pos and ending in Pos+Size, falls within the
3110 // specified sequential range [L, L+Pos). or is undef.
isSequentialInRange(const SmallVectorImpl<int> & Mask,unsigned Pos,unsigned Size,int Low)3111 static bool isSequentialInRange(const SmallVectorImpl<int> &Mask,
3112 unsigned Pos, unsigned Size, int Low) {
3113 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3114 if (Mask[i] >= 0 && Mask[i] != Low)
3115 return false;
3116 return true;
3117 }
3118
visitShuffleVector(const User & I)3119 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3120 SDValue Src1 = getValue(I.getOperand(0));
3121 SDValue Src2 = getValue(I.getOperand(1));
3122
3123 SmallVector<int, 8> Mask;
3124 ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
3125 unsigned MaskNumElts = Mask.size();
3126
3127 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3128 EVT VT = TLI.getValueType(I.getType());
3129 EVT SrcVT = Src1.getValueType();
3130 unsigned SrcNumElts = SrcVT.getVectorNumElements();
3131
3132 if (SrcNumElts == MaskNumElts) {
3133 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
3134 &Mask[0]));
3135 return;
3136 }
3137
3138 // Normalize the shuffle vector since mask and vector length don't match.
3139 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
3140 // Mask is longer than the source vectors and is a multiple of the source
3141 // vectors. We can use concatenate vector to make the mask and vectors
3142 // lengths match.
3143 if (SrcNumElts*2 == MaskNumElts) {
3144 // First check for Src1 in low and Src2 in high
3145 if (isSequentialInRange(Mask, 0, SrcNumElts, 0) &&
3146 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) {
3147 // The shuffle is concatenating two vectors together.
3148 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
3149 VT, Src1, Src2));
3150 return;
3151 }
3152 // Then check for Src2 in low and Src1 in high
3153 if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) &&
3154 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) {
3155 // The shuffle is concatenating two vectors together.
3156 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
3157 VT, Src2, Src1));
3158 return;
3159 }
3160 }
3161
3162 // Pad both vectors with undefs to make them the same length as the mask.
3163 unsigned NumConcat = MaskNumElts / SrcNumElts;
3164 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
3165 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
3166 SDValue UndefVal = DAG.getUNDEF(SrcVT);
3167
3168 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3169 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3170 MOps1[0] = Src1;
3171 MOps2[0] = Src2;
3172
3173 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
3174 getCurSDLoc(), VT, MOps1);
3175 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
3176 getCurSDLoc(), VT, MOps2);
3177
3178 // Readjust mask for new input vector length.
3179 SmallVector<int, 8> MappedOps;
3180 for (unsigned i = 0; i != MaskNumElts; ++i) {
3181 int Idx = Mask[i];
3182 if (Idx >= (int)SrcNumElts)
3183 Idx -= SrcNumElts - MaskNumElts;
3184 MappedOps.push_back(Idx);
3185 }
3186
3187 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
3188 &MappedOps[0]));
3189 return;
3190 }
3191
3192 if (SrcNumElts > MaskNumElts) {
3193 // Analyze the access pattern of the vector to see if we can extract
3194 // two subvectors and do the shuffle. The analysis is done by calculating
3195 // the range of elements the mask access on both vectors.
3196 int MinRange[2] = { static_cast<int>(SrcNumElts),
3197 static_cast<int>(SrcNumElts)};
3198 int MaxRange[2] = {-1, -1};
3199
3200 for (unsigned i = 0; i != MaskNumElts; ++i) {
3201 int Idx = Mask[i];
3202 unsigned Input = 0;
3203 if (Idx < 0)
3204 continue;
3205
3206 if (Idx >= (int)SrcNumElts) {
3207 Input = 1;
3208 Idx -= SrcNumElts;
3209 }
3210 if (Idx > MaxRange[Input])
3211 MaxRange[Input] = Idx;
3212 if (Idx < MinRange[Input])
3213 MinRange[Input] = Idx;
3214 }
3215
3216 // Check if the access is smaller than the vector size and can we find
3217 // a reasonable extract index.
3218 int RangeUse[2] = { -1, -1 }; // 0 = Unused, 1 = Extract, -1 = Can not
3219 // Extract.
3220 int StartIdx[2]; // StartIdx to extract from
3221 for (unsigned Input = 0; Input < 2; ++Input) {
3222 if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) {
3223 RangeUse[Input] = 0; // Unused
3224 StartIdx[Input] = 0;
3225 continue;
3226 }
3227
3228 // Find a good start index that is a multiple of the mask length. Then
3229 // see if the rest of the elements are in range.
3230 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
3231 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
3232 StartIdx[Input] + MaskNumElts <= SrcNumElts)
3233 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
3234 }
3235
3236 if (RangeUse[0] == 0 && RangeUse[1] == 0) {
3237 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3238 return;
3239 }
3240 if (RangeUse[0] >= 0 && RangeUse[1] >= 0) {
3241 // Extract appropriate subvector and generate a vector shuffle
3242 for (unsigned Input = 0; Input < 2; ++Input) {
3243 SDValue &Src = Input == 0 ? Src1 : Src2;
3244 if (RangeUse[Input] == 0)
3245 Src = DAG.getUNDEF(VT);
3246 else
3247 Src = DAG.getNode(
3248 ISD::EXTRACT_SUBVECTOR, getCurSDLoc(), VT, Src,
3249 DAG.getConstant(StartIdx[Input], TLI.getVectorIdxTy()));
3250 }
3251
3252 // Calculate new mask.
3253 SmallVector<int, 8> MappedOps;
3254 for (unsigned i = 0; i != MaskNumElts; ++i) {
3255 int Idx = Mask[i];
3256 if (Idx >= 0) {
3257 if (Idx < (int)SrcNumElts)
3258 Idx -= StartIdx[0];
3259 else
3260 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3261 }
3262 MappedOps.push_back(Idx);
3263 }
3264
3265 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
3266 &MappedOps[0]));
3267 return;
3268 }
3269 }
3270
3271 // We can't use either concat vectors or extract subvectors so fall back to
3272 // replacing the shuffle with extract and build vector.
3273 // to insert and build vector.
3274 EVT EltVT = VT.getVectorElementType();
3275 EVT IdxVT = TLI.getVectorIdxTy();
3276 SmallVector<SDValue,8> Ops;
3277 for (unsigned i = 0; i != MaskNumElts; ++i) {
3278 int Idx = Mask[i];
3279 SDValue Res;
3280
3281 if (Idx < 0) {
3282 Res = DAG.getUNDEF(EltVT);
3283 } else {
3284 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3285 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3286
3287 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3288 EltVT, Src, DAG.getConstant(Idx, IdxVT));
3289 }
3290
3291 Ops.push_back(Res);
3292 }
3293
3294 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops));
3295 }
3296
visitInsertValue(const InsertValueInst & I)3297 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
3298 const Value *Op0 = I.getOperand(0);
3299 const Value *Op1 = I.getOperand(1);
3300 Type *AggTy = I.getType();
3301 Type *ValTy = Op1->getType();
3302 bool IntoUndef = isa<UndefValue>(Op0);
3303 bool FromUndef = isa<UndefValue>(Op1);
3304
3305 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3306
3307 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3308 SmallVector<EVT, 4> AggValueVTs;
3309 ComputeValueVTs(TLI, AggTy, AggValueVTs);
3310 SmallVector<EVT, 4> ValValueVTs;
3311 ComputeValueVTs(TLI, ValTy, ValValueVTs);
3312
3313 unsigned NumAggValues = AggValueVTs.size();
3314 unsigned NumValValues = ValValueVTs.size();
3315 SmallVector<SDValue, 4> Values(NumAggValues);
3316
3317 // Ignore an insertvalue that produces an empty object
3318 if (!NumAggValues) {
3319 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3320 return;
3321 }
3322
3323 SDValue Agg = getValue(Op0);
3324 unsigned i = 0;
3325 // Copy the beginning value(s) from the original aggregate.
3326 for (; i != LinearIndex; ++i)
3327 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3328 SDValue(Agg.getNode(), Agg.getResNo() + i);
3329 // Copy values from the inserted value(s).
3330 if (NumValValues) {
3331 SDValue Val = getValue(Op1);
3332 for (; i != LinearIndex + NumValValues; ++i)
3333 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3334 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3335 }
3336 // Copy remaining value(s) from the original aggregate.
3337 for (; i != NumAggValues; ++i)
3338 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3339 SDValue(Agg.getNode(), Agg.getResNo() + i);
3340
3341 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3342 DAG.getVTList(AggValueVTs), Values));
3343 }
3344
visitExtractValue(const ExtractValueInst & I)3345 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
3346 const Value *Op0 = I.getOperand(0);
3347 Type *AggTy = Op0->getType();
3348 Type *ValTy = I.getType();
3349 bool OutOfUndef = isa<UndefValue>(Op0);
3350
3351 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3352
3353 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3354 SmallVector<EVT, 4> ValValueVTs;
3355 ComputeValueVTs(TLI, ValTy, ValValueVTs);
3356
3357 unsigned NumValValues = ValValueVTs.size();
3358
3359 // Ignore a extractvalue that produces an empty object
3360 if (!NumValValues) {
3361 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3362 return;
3363 }
3364
3365 SmallVector<SDValue, 4> Values(NumValValues);
3366
3367 SDValue Agg = getValue(Op0);
3368 // Copy out the selected value(s).
3369 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3370 Values[i - LinearIndex] =
3371 OutOfUndef ?
3372 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3373 SDValue(Agg.getNode(), Agg.getResNo() + i);
3374
3375 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3376 DAG.getVTList(ValValueVTs), Values));
3377 }
3378
visitGetElementPtr(const User & I)3379 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3380 Value *Op0 = I.getOperand(0);
3381 // Note that the pointer operand may be a vector of pointers. Take the scalar
3382 // element which holds a pointer.
3383 Type *Ty = Op0->getType()->getScalarType();
3384 unsigned AS = Ty->getPointerAddressSpace();
3385 SDValue N = getValue(Op0);
3386
3387 for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
3388 OI != E; ++OI) {
3389 const Value *Idx = *OI;
3390 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
3391 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3392 if (Field) {
3393 // N = N + Offset
3394 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3395 N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N,
3396 DAG.getConstant(Offset, N.getValueType()));
3397 }
3398
3399 Ty = StTy->getElementType(Field);
3400 } else {
3401 Ty = cast<SequentialType>(Ty)->getElementType();
3402 MVT PtrTy = DAG.getTargetLoweringInfo().getPointerTy(AS);
3403 unsigned PtrSize = PtrTy.getSizeInBits();
3404 APInt ElementSize(PtrSize, DL->getTypeAllocSize(Ty));
3405
3406 // If this is a constant subscript, handle it quickly.
3407 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
3408 if (CI->isZero())
3409 continue;
3410 APInt Offs = ElementSize * CI->getValue().sextOrTrunc(PtrSize);
3411 SDValue OffsVal = DAG.getConstant(Offs, PtrTy);
3412 N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N, OffsVal);
3413 continue;
3414 }
3415
3416 // N = N + Idx * ElementSize;
3417 SDValue IdxN = getValue(Idx);
3418
3419 // If the index is smaller or larger than intptr_t, truncate or extend
3420 // it.
3421 IdxN = DAG.getSExtOrTrunc(IdxN, getCurSDLoc(), N.getValueType());
3422
3423 // If this is a multiply by a power of two, turn it into a shl
3424 // immediately. This is a very common case.
3425 if (ElementSize != 1) {
3426 if (ElementSize.isPowerOf2()) {
3427 unsigned Amt = ElementSize.logBase2();
3428 IdxN = DAG.getNode(ISD::SHL, getCurSDLoc(),
3429 N.getValueType(), IdxN,
3430 DAG.getConstant(Amt, IdxN.getValueType()));
3431 } else {
3432 SDValue Scale = DAG.getConstant(ElementSize, IdxN.getValueType());
3433 IdxN = DAG.getNode(ISD::MUL, getCurSDLoc(),
3434 N.getValueType(), IdxN, Scale);
3435 }
3436 }
3437
3438 N = DAG.getNode(ISD::ADD, getCurSDLoc(),
3439 N.getValueType(), N, IdxN);
3440 }
3441 }
3442
3443 setValue(&I, N);
3444 }
3445
visitAlloca(const AllocaInst & I)3446 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3447 // If this is a fixed sized alloca in the entry block of the function,
3448 // allocate it statically on the stack.
3449 if (FuncInfo.StaticAllocaMap.count(&I))
3450 return; // getValue will auto-populate this.
3451
3452 Type *Ty = I.getAllocatedType();
3453 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3454 uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
3455 unsigned Align =
3456 std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
3457 I.getAlignment());
3458
3459 SDValue AllocSize = getValue(I.getArraySize());
3460
3461 EVT IntPtr = TLI.getPointerTy();
3462 if (AllocSize.getValueType() != IntPtr)
3463 AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurSDLoc(), IntPtr);
3464
3465 AllocSize = DAG.getNode(ISD::MUL, getCurSDLoc(), IntPtr,
3466 AllocSize,
3467 DAG.getConstant(TySize, IntPtr));
3468
3469 // Handle alignment. If the requested alignment is less than or equal to
3470 // the stack alignment, ignore it. If the size is greater than or equal to
3471 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3472 unsigned StackAlign =
3473 DAG.getSubtarget().getFrameLowering()->getStackAlignment();
3474 if (Align <= StackAlign)
3475 Align = 0;
3476
3477 // Round the size of the allocation up to the stack alignment size
3478 // by add SA-1 to the size.
3479 AllocSize = DAG.getNode(ISD::ADD, getCurSDLoc(),
3480 AllocSize.getValueType(), AllocSize,
3481 DAG.getIntPtrConstant(StackAlign-1));
3482
3483 // Mask out the low bits for alignment purposes.
3484 AllocSize = DAG.getNode(ISD::AND, getCurSDLoc(),
3485 AllocSize.getValueType(), AllocSize,
3486 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
3487
3488 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
3489 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3490 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurSDLoc(), VTs, Ops);
3491 setValue(&I, DSA);
3492 DAG.setRoot(DSA.getValue(1));
3493
3494 assert(FuncInfo.MF->getFrameInfo()->hasVarSizedObjects());
3495 }
3496
visitLoad(const LoadInst & I)3497 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3498 if (I.isAtomic())
3499 return visitAtomicLoad(I);
3500
3501 const Value *SV = I.getOperand(0);
3502 SDValue Ptr = getValue(SV);
3503
3504 Type *Ty = I.getType();
3505
3506 bool isVolatile = I.isVolatile();
3507 bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
3508 bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr;
3509 unsigned Alignment = I.getAlignment();
3510
3511 AAMDNodes AAInfo;
3512 I.getAAMetadata(AAInfo);
3513 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3514
3515 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3516 SmallVector<EVT, 4> ValueVTs;
3517 SmallVector<uint64_t, 4> Offsets;
3518 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
3519 unsigned NumValues = ValueVTs.size();
3520 if (NumValues == 0)
3521 return;
3522
3523 SDValue Root;
3524 bool ConstantMemory = false;
3525 if (isVolatile || NumValues > MaxParallelChains)
3526 // Serialize volatile loads with other side effects.
3527 Root = getRoot();
3528 else if (AA->pointsToConstantMemory(
3529 AliasAnalysis::Location(SV, AA->getTypeStoreSize(Ty), AAInfo))) {
3530 // Do not serialize (non-volatile) loads of constant memory with anything.
3531 Root = DAG.getEntryNode();
3532 ConstantMemory = true;
3533 } else {
3534 // Do not serialize non-volatile loads against each other.
3535 Root = DAG.getRoot();
3536 }
3537
3538 if (isVolatile)
3539 Root = TLI.prepareVolatileOrAtomicLoad(Root, getCurSDLoc(), DAG);
3540
3541 SmallVector<SDValue, 4> Values(NumValues);
3542 SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
3543 NumValues));
3544 EVT PtrVT = Ptr.getValueType();
3545 unsigned ChainI = 0;
3546 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3547 // Serializing loads here may result in excessive register pressure, and
3548 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3549 // could recover a bit by hoisting nodes upward in the chain by recognizing
3550 // they are side-effect free or do not alias. The optimizer should really
3551 // avoid this case by converting large object/array copies to llvm.memcpy
3552 // (MaxParallelChains should always remain as failsafe).
3553 if (ChainI == MaxParallelChains) {
3554 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
3555 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3556 makeArrayRef(Chains.data(), ChainI));
3557 Root = Chain;
3558 ChainI = 0;
3559 }
3560 SDValue A = DAG.getNode(ISD::ADD, getCurSDLoc(),
3561 PtrVT, Ptr,
3562 DAG.getConstant(Offsets[i], PtrVT));
3563 SDValue L = DAG.getLoad(ValueVTs[i], getCurSDLoc(), Root,
3564 A, MachinePointerInfo(SV, Offsets[i]), isVolatile,
3565 isNonTemporal, isInvariant, Alignment, AAInfo,
3566 Ranges);
3567
3568 Values[i] = L;
3569 Chains[ChainI] = L.getValue(1);
3570 }
3571
3572 if (!ConstantMemory) {
3573 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3574 makeArrayRef(Chains.data(), ChainI));
3575 if (isVolatile)
3576 DAG.setRoot(Chain);
3577 else
3578 PendingLoads.push_back(Chain);
3579 }
3580
3581 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3582 DAG.getVTList(ValueVTs), Values));
3583 }
3584
visitStore(const StoreInst & I)3585 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
3586 if (I.isAtomic())
3587 return visitAtomicStore(I);
3588
3589 const Value *SrcV = I.getOperand(0);
3590 const Value *PtrV = I.getOperand(1);
3591
3592 SmallVector<EVT, 4> ValueVTs;
3593 SmallVector<uint64_t, 4> Offsets;
3594 ComputeValueVTs(DAG.getTargetLoweringInfo(), SrcV->getType(),
3595 ValueVTs, &Offsets);
3596 unsigned NumValues = ValueVTs.size();
3597 if (NumValues == 0)
3598 return;
3599
3600 // Get the lowered operands. Note that we do this after
3601 // checking if NumResults is zero, because with zero results
3602 // the operands won't have values in the map.
3603 SDValue Src = getValue(SrcV);
3604 SDValue Ptr = getValue(PtrV);
3605
3606 SDValue Root = getRoot();
3607 SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
3608 NumValues));
3609 EVT PtrVT = Ptr.getValueType();
3610 bool isVolatile = I.isVolatile();
3611 bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
3612 unsigned Alignment = I.getAlignment();
3613
3614 AAMDNodes AAInfo;
3615 I.getAAMetadata(AAInfo);
3616
3617 unsigned ChainI = 0;
3618 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3619 // See visitLoad comments.
3620 if (ChainI == MaxParallelChains) {
3621 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3622 makeArrayRef(Chains.data(), ChainI));
3623 Root = Chain;
3624 ChainI = 0;
3625 }
3626 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), PtrVT, Ptr,
3627 DAG.getConstant(Offsets[i], PtrVT));
3628 SDValue St = DAG.getStore(Root, getCurSDLoc(),
3629 SDValue(Src.getNode(), Src.getResNo() + i),
3630 Add, MachinePointerInfo(PtrV, Offsets[i]),
3631 isVolatile, isNonTemporal, Alignment, AAInfo);
3632 Chains[ChainI] = St;
3633 }
3634
3635 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3636 makeArrayRef(Chains.data(), ChainI));
3637 DAG.setRoot(StoreNode);
3638 }
3639
visitMaskedStore(const CallInst & I)3640 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) {
3641 SDLoc sdl = getCurSDLoc();
3642
3643 // llvm.masked.store.*(Src0, Ptr, alignemt, Mask)
3644 Value *PtrOperand = I.getArgOperand(1);
3645 SDValue Ptr = getValue(PtrOperand);
3646 SDValue Src0 = getValue(I.getArgOperand(0));
3647 SDValue Mask = getValue(I.getArgOperand(3));
3648 EVT VT = Src0.getValueType();
3649 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
3650 if (!Alignment)
3651 Alignment = DAG.getEVTAlignment(VT);
3652
3653 AAMDNodes AAInfo;
3654 I.getAAMetadata(AAInfo);
3655
3656 MachineMemOperand *MMO =
3657 DAG.getMachineFunction().
3658 getMachineMemOperand(MachinePointerInfo(PtrOperand),
3659 MachineMemOperand::MOStore, VT.getStoreSize(),
3660 Alignment, AAInfo);
3661 SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
3662 MMO, false);
3663 DAG.setRoot(StoreNode);
3664 setValue(&I, StoreNode);
3665 }
3666
visitMaskedLoad(const CallInst & I)3667 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I) {
3668 SDLoc sdl = getCurSDLoc();
3669
3670 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
3671 Value *PtrOperand = I.getArgOperand(0);
3672 SDValue Ptr = getValue(PtrOperand);
3673 SDValue Src0 = getValue(I.getArgOperand(3));
3674 SDValue Mask = getValue(I.getArgOperand(2));
3675
3676 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3677 EVT VT = TLI.getValueType(I.getType());
3678 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
3679 if (!Alignment)
3680 Alignment = DAG.getEVTAlignment(VT);
3681
3682 AAMDNodes AAInfo;
3683 I.getAAMetadata(AAInfo);
3684 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3685
3686 SDValue InChain = DAG.getRoot();
3687 if (AA->pointsToConstantMemory(
3688 AliasAnalysis::Location(PtrOperand,
3689 AA->getTypeStoreSize(I.getType()),
3690 AAInfo))) {
3691 // Do not serialize (non-volatile) loads of constant memory with anything.
3692 InChain = DAG.getEntryNode();
3693 }
3694
3695 MachineMemOperand *MMO =
3696 DAG.getMachineFunction().
3697 getMachineMemOperand(MachinePointerInfo(PtrOperand),
3698 MachineMemOperand::MOLoad, VT.getStoreSize(),
3699 Alignment, AAInfo, Ranges);
3700
3701 SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
3702 ISD::NON_EXTLOAD);
3703 SDValue OutChain = Load.getValue(1);
3704 DAG.setRoot(OutChain);
3705 setValue(&I, Load);
3706 }
3707
visitAtomicCmpXchg(const AtomicCmpXchgInst & I)3708 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
3709 SDLoc dl = getCurSDLoc();
3710 AtomicOrdering SuccessOrder = I.getSuccessOrdering();
3711 AtomicOrdering FailureOrder = I.getFailureOrdering();
3712 SynchronizationScope Scope = I.getSynchScope();
3713
3714 SDValue InChain = getRoot();
3715
3716 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
3717 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
3718 SDValue L = DAG.getAtomicCmpSwap(
3719 ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
3720 getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
3721 getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
3722 /*Alignment=*/ 0, SuccessOrder, FailureOrder, Scope);
3723
3724 SDValue OutChain = L.getValue(2);
3725
3726 setValue(&I, L);
3727 DAG.setRoot(OutChain);
3728 }
3729
visitAtomicRMW(const AtomicRMWInst & I)3730 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
3731 SDLoc dl = getCurSDLoc();
3732 ISD::NodeType NT;
3733 switch (I.getOperation()) {
3734 default: llvm_unreachable("Unknown atomicrmw operation");
3735 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
3736 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
3737 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
3738 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
3739 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
3740 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
3741 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
3742 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
3743 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
3744 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
3745 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
3746 }
3747 AtomicOrdering Order = I.getOrdering();
3748 SynchronizationScope Scope = I.getSynchScope();
3749
3750 SDValue InChain = getRoot();
3751
3752 SDValue L =
3753 DAG.getAtomic(NT, dl,
3754 getValue(I.getValOperand()).getSimpleValueType(),
3755 InChain,
3756 getValue(I.getPointerOperand()),
3757 getValue(I.getValOperand()),
3758 I.getPointerOperand(),
3759 /* Alignment=*/ 0, Order, Scope);
3760
3761 SDValue OutChain = L.getValue(1);
3762
3763 setValue(&I, L);
3764 DAG.setRoot(OutChain);
3765 }
3766
visitFence(const FenceInst & I)3767 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
3768 SDLoc dl = getCurSDLoc();
3769 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3770 SDValue Ops[3];
3771 Ops[0] = getRoot();
3772 Ops[1] = DAG.getConstant(I.getOrdering(), TLI.getPointerTy());
3773 Ops[2] = DAG.getConstant(I.getSynchScope(), TLI.getPointerTy());
3774 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
3775 }
3776
visitAtomicLoad(const LoadInst & I)3777 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
3778 SDLoc dl = getCurSDLoc();
3779 AtomicOrdering Order = I.getOrdering();
3780 SynchronizationScope Scope = I.getSynchScope();
3781
3782 SDValue InChain = getRoot();
3783
3784 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3785 EVT VT = TLI.getValueType(I.getType());
3786
3787 if (I.getAlignment() < VT.getSizeInBits() / 8)
3788 report_fatal_error("Cannot generate unaligned atomic load");
3789
3790 MachineMemOperand *MMO =
3791 DAG.getMachineFunction().
3792 getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3793 MachineMemOperand::MOVolatile |
3794 MachineMemOperand::MOLoad,
3795 VT.getStoreSize(),
3796 I.getAlignment() ? I.getAlignment() :
3797 DAG.getEVTAlignment(VT));
3798
3799 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
3800 SDValue L =
3801 DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
3802 getValue(I.getPointerOperand()), MMO,
3803 Order, Scope);
3804
3805 SDValue OutChain = L.getValue(1);
3806
3807 setValue(&I, L);
3808 DAG.setRoot(OutChain);
3809 }
3810
visitAtomicStore(const StoreInst & I)3811 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
3812 SDLoc dl = getCurSDLoc();
3813
3814 AtomicOrdering Order = I.getOrdering();
3815 SynchronizationScope Scope = I.getSynchScope();
3816
3817 SDValue InChain = getRoot();
3818
3819 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3820 EVT VT = TLI.getValueType(I.getValueOperand()->getType());
3821
3822 if (I.getAlignment() < VT.getSizeInBits() / 8)
3823 report_fatal_error("Cannot generate unaligned atomic store");
3824
3825 SDValue OutChain =
3826 DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
3827 InChain,
3828 getValue(I.getPointerOperand()),
3829 getValue(I.getValueOperand()),
3830 I.getPointerOperand(), I.getAlignment(),
3831 Order, Scope);
3832
3833 DAG.setRoot(OutChain);
3834 }
3835
3836 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
3837 /// node.
visitTargetIntrinsic(const CallInst & I,unsigned Intrinsic)3838 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
3839 unsigned Intrinsic) {
3840 bool HasChain = !I.doesNotAccessMemory();
3841 bool OnlyLoad = HasChain && I.onlyReadsMemory();
3842
3843 // Build the operand list.
3844 SmallVector<SDValue, 8> Ops;
3845 if (HasChain) { // If this intrinsic has side-effects, chainify it.
3846 if (OnlyLoad) {
3847 // We don't need to serialize loads against other loads.
3848 Ops.push_back(DAG.getRoot());
3849 } else {
3850 Ops.push_back(getRoot());
3851 }
3852 }
3853
3854 // Info is set by getTgtMemInstrinsic
3855 TargetLowering::IntrinsicInfo Info;
3856 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3857 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
3858
3859 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
3860 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
3861 Info.opc == ISD::INTRINSIC_W_CHAIN)
3862 Ops.push_back(DAG.getTargetConstant(Intrinsic, TLI.getPointerTy()));
3863
3864 // Add all operands of the call to the operand list.
3865 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
3866 SDValue Op = getValue(I.getArgOperand(i));
3867 Ops.push_back(Op);
3868 }
3869
3870 SmallVector<EVT, 4> ValueVTs;
3871 ComputeValueVTs(TLI, I.getType(), ValueVTs);
3872
3873 if (HasChain)
3874 ValueVTs.push_back(MVT::Other);
3875
3876 SDVTList VTs = DAG.getVTList(ValueVTs);
3877
3878 // Create the node.
3879 SDValue Result;
3880 if (IsTgtIntrinsic) {
3881 // This is target intrinsic that touches memory
3882 Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(),
3883 VTs, Ops, Info.memVT,
3884 MachinePointerInfo(Info.ptrVal, Info.offset),
3885 Info.align, Info.vol,
3886 Info.readMem, Info.writeMem, Info.size);
3887 } else if (!HasChain) {
3888 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
3889 } else if (!I.getType()->isVoidTy()) {
3890 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
3891 } else {
3892 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
3893 }
3894
3895 if (HasChain) {
3896 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
3897 if (OnlyLoad)
3898 PendingLoads.push_back(Chain);
3899 else
3900 DAG.setRoot(Chain);
3901 }
3902
3903 if (!I.getType()->isVoidTy()) {
3904 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
3905 EVT VT = TLI.getValueType(PTy);
3906 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
3907 }
3908
3909 setValue(&I, Result);
3910 }
3911 }
3912
3913 /// GetSignificand - Get the significand and build it into a floating-point
3914 /// number with exponent of 1:
3915 ///
3916 /// Op = (Op & 0x007fffff) | 0x3f800000;
3917 ///
3918 /// where Op is the hexadecimal representation of floating point value.
3919 static SDValue
GetSignificand(SelectionDAG & DAG,SDValue Op,SDLoc dl)3920 GetSignificand(SelectionDAG &DAG, SDValue Op, SDLoc dl) {
3921 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3922 DAG.getConstant(0x007fffff, MVT::i32));
3923 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3924 DAG.getConstant(0x3f800000, MVT::i32));
3925 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
3926 }
3927
3928 /// GetExponent - Get the exponent:
3929 ///
3930 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3931 ///
3932 /// where Op is the hexadecimal representation of floating point value.
3933 static SDValue
GetExponent(SelectionDAG & DAG,SDValue Op,const TargetLowering & TLI,SDLoc dl)3934 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3935 SDLoc dl) {
3936 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3937 DAG.getConstant(0x7f800000, MVT::i32));
3938 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3939 DAG.getConstant(23, TLI.getPointerTy()));
3940 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3941 DAG.getConstant(127, MVT::i32));
3942 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3943 }
3944
3945 /// getF32Constant - Get 32-bit floating point constant.
3946 static SDValue
getF32Constant(SelectionDAG & DAG,unsigned Flt)3947 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3948 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle, APInt(32, Flt)),
3949 MVT::f32);
3950 }
3951
3952 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
3953 /// limited-precision mode.
expandExp(SDLoc dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI)3954 static SDValue expandExp(SDLoc dl, SDValue Op, SelectionDAG &DAG,
3955 const TargetLowering &TLI) {
3956 if (Op.getValueType() == MVT::f32 &&
3957 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3958
3959 // Put the exponent in the right bit position for later addition to the
3960 // final result:
3961 //
3962 // #define LOG2OFe 1.4426950f
3963 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3964 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3965 getF32Constant(DAG, 0x3fb8aa3b));
3966 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3967
3968 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3969 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3970 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3971
3972 // IntegerPartOfX <<= 23;
3973 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3974 DAG.getConstant(23, TLI.getPointerTy()));
3975
3976 SDValue TwoToFracPartOfX;
3977 if (LimitFloatPrecision <= 6) {
3978 // For floating-point precision of 6:
3979 //
3980 // TwoToFractionalPartOfX =
3981 // 0.997535578f +
3982 // (0.735607626f + 0.252464424f * x) * x;
3983 //
3984 // error 0.0144103317, which is 6 bits
3985 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3986 getF32Constant(DAG, 0x3e814304));
3987 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3988 getF32Constant(DAG, 0x3f3c50c8));
3989 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3990 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3991 getF32Constant(DAG, 0x3f7f5e7e));
3992 } else if (LimitFloatPrecision <= 12) {
3993 // For floating-point precision of 12:
3994 //
3995 // TwoToFractionalPartOfX =
3996 // 0.999892986f +
3997 // (0.696457318f +
3998 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3999 //
4000 // 0.000107046256 error, which is 13 to 14 bits
4001 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4002 getF32Constant(DAG, 0x3da235e3));
4003 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4004 getF32Constant(DAG, 0x3e65b8f3));
4005 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4006 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4007 getF32Constant(DAG, 0x3f324b07));
4008 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4009 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4010 getF32Constant(DAG, 0x3f7ff8fd));
4011 } else { // LimitFloatPrecision <= 18
4012 // For floating-point precision of 18:
4013 //
4014 // TwoToFractionalPartOfX =
4015 // 0.999999982f +
4016 // (0.693148872f +
4017 // (0.240227044f +
4018 // (0.554906021e-1f +
4019 // (0.961591928e-2f +
4020 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4021 //
4022 // error 2.47208000*10^(-7), which is better than 18 bits
4023 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4024 getF32Constant(DAG, 0x3924b03e));
4025 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4026 getF32Constant(DAG, 0x3ab24b87));
4027 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4028 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4029 getF32Constant(DAG, 0x3c1d8c17));
4030 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4031 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4032 getF32Constant(DAG, 0x3d634a1d));
4033 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4034 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4035 getF32Constant(DAG, 0x3e75fe14));
4036 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4037 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4038 getF32Constant(DAG, 0x3f317234));
4039 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4040 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4041 getF32Constant(DAG, 0x3f800000));
4042 }
4043
4044 // Add the exponent into the result in integer domain.
4045 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFracPartOfX);
4046 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4047 DAG.getNode(ISD::ADD, dl, MVT::i32,
4048 t13, IntegerPartOfX));
4049 }
4050
4051 // No special expansion.
4052 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4053 }
4054
4055 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4056 /// limited-precision mode.
expandLog(SDLoc dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI)4057 static SDValue expandLog(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4058 const TargetLowering &TLI) {
4059 if (Op.getValueType() == MVT::f32 &&
4060 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4061 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4062
4063 // Scale the exponent by log(2) [0.69314718f].
4064 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4065 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4066 getF32Constant(DAG, 0x3f317218));
4067
4068 // Get the significand and build it into a floating-point number with
4069 // exponent of 1.
4070 SDValue X = GetSignificand(DAG, Op1, dl);
4071
4072 SDValue LogOfMantissa;
4073 if (LimitFloatPrecision <= 6) {
4074 // For floating-point precision of 6:
4075 //
4076 // LogofMantissa =
4077 // -1.1609546f +
4078 // (1.4034025f - 0.23903021f * x) * x;
4079 //
4080 // error 0.0034276066, which is better than 8 bits
4081 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4082 getF32Constant(DAG, 0xbe74c456));
4083 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4084 getF32Constant(DAG, 0x3fb3a2b1));
4085 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4086 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4087 getF32Constant(DAG, 0x3f949a29));
4088 } else if (LimitFloatPrecision <= 12) {
4089 // For floating-point precision of 12:
4090 //
4091 // LogOfMantissa =
4092 // -1.7417939f +
4093 // (2.8212026f +
4094 // (-1.4699568f +
4095 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4096 //
4097 // error 0.000061011436, which is 14 bits
4098 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4099 getF32Constant(DAG, 0xbd67b6d6));
4100 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4101 getF32Constant(DAG, 0x3ee4f4b8));
4102 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4103 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4104 getF32Constant(DAG, 0x3fbc278b));
4105 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4106 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4107 getF32Constant(DAG, 0x40348e95));
4108 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4109 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4110 getF32Constant(DAG, 0x3fdef31a));
4111 } else { // LimitFloatPrecision <= 18
4112 // For floating-point precision of 18:
4113 //
4114 // LogOfMantissa =
4115 // -2.1072184f +
4116 // (4.2372794f +
4117 // (-3.7029485f +
4118 // (2.2781945f +
4119 // (-0.87823314f +
4120 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4121 //
4122 // error 0.0000023660568, which is better than 18 bits
4123 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4124 getF32Constant(DAG, 0xbc91e5ac));
4125 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4126 getF32Constant(DAG, 0x3e4350aa));
4127 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4128 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4129 getF32Constant(DAG, 0x3f60d3e3));
4130 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4131 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4132 getF32Constant(DAG, 0x4011cdf0));
4133 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4134 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4135 getF32Constant(DAG, 0x406cfd1c));
4136 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4137 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4138 getF32Constant(DAG, 0x408797cb));
4139 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4140 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4141 getF32Constant(DAG, 0x4006dcab));
4142 }
4143
4144 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4145 }
4146
4147 // No special expansion.
4148 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4149 }
4150
4151 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4152 /// limited-precision mode.
expandLog2(SDLoc dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI)4153 static SDValue expandLog2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4154 const TargetLowering &TLI) {
4155 if (Op.getValueType() == MVT::f32 &&
4156 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4157 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4158
4159 // Get the exponent.
4160 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4161
4162 // Get the significand and build it into a floating-point number with
4163 // exponent of 1.
4164 SDValue X = GetSignificand(DAG, Op1, dl);
4165
4166 // Different possible minimax approximations of significand in
4167 // floating-point for various degrees of accuracy over [1,2].
4168 SDValue Log2ofMantissa;
4169 if (LimitFloatPrecision <= 6) {
4170 // For floating-point precision of 6:
4171 //
4172 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4173 //
4174 // error 0.0049451742, which is more than 7 bits
4175 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4176 getF32Constant(DAG, 0xbeb08fe0));
4177 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4178 getF32Constant(DAG, 0x40019463));
4179 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4180 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4181 getF32Constant(DAG, 0x3fd6633d));
4182 } else if (LimitFloatPrecision <= 12) {
4183 // For floating-point precision of 12:
4184 //
4185 // Log2ofMantissa =
4186 // -2.51285454f +
4187 // (4.07009056f +
4188 // (-2.12067489f +
4189 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4190 //
4191 // error 0.0000876136000, which is better than 13 bits
4192 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4193 getF32Constant(DAG, 0xbda7262e));
4194 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4195 getF32Constant(DAG, 0x3f25280b));
4196 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4197 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4198 getF32Constant(DAG, 0x4007b923));
4199 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4200 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4201 getF32Constant(DAG, 0x40823e2f));
4202 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4203 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4204 getF32Constant(DAG, 0x4020d29c));
4205 } else { // LimitFloatPrecision <= 18
4206 // For floating-point precision of 18:
4207 //
4208 // Log2ofMantissa =
4209 // -3.0400495f +
4210 // (6.1129976f +
4211 // (-5.3420409f +
4212 // (3.2865683f +
4213 // (-1.2669343f +
4214 // (0.27515199f -
4215 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4216 //
4217 // error 0.0000018516, which is better than 18 bits
4218 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4219 getF32Constant(DAG, 0xbcd2769e));
4220 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4221 getF32Constant(DAG, 0x3e8ce0b9));
4222 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4223 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4224 getF32Constant(DAG, 0x3fa22ae7));
4225 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4226 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4227 getF32Constant(DAG, 0x40525723));
4228 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4229 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4230 getF32Constant(DAG, 0x40aaf200));
4231 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4232 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4233 getF32Constant(DAG, 0x40c39dad));
4234 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4235 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4236 getF32Constant(DAG, 0x4042902c));
4237 }
4238
4239 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
4240 }
4241
4242 // No special expansion.
4243 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
4244 }
4245
4246 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
4247 /// limited-precision mode.
expandLog10(SDLoc dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI)4248 static SDValue expandLog10(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4249 const TargetLowering &TLI) {
4250 if (Op.getValueType() == MVT::f32 &&
4251 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4252 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4253
4254 // Scale the exponent by log10(2) [0.30102999f].
4255 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4256 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4257 getF32Constant(DAG, 0x3e9a209a));
4258
4259 // Get the significand and build it into a floating-point number with
4260 // exponent of 1.
4261 SDValue X = GetSignificand(DAG, Op1, dl);
4262
4263 SDValue Log10ofMantissa;
4264 if (LimitFloatPrecision <= 6) {
4265 // For floating-point precision of 6:
4266 //
4267 // Log10ofMantissa =
4268 // -0.50419619f +
4269 // (0.60948995f - 0.10380950f * x) * x;
4270 //
4271 // error 0.0014886165, which is 6 bits
4272 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4273 getF32Constant(DAG, 0xbdd49a13));
4274 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4275 getF32Constant(DAG, 0x3f1c0789));
4276 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4277 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4278 getF32Constant(DAG, 0x3f011300));
4279 } else if (LimitFloatPrecision <= 12) {
4280 // For floating-point precision of 12:
4281 //
4282 // Log10ofMantissa =
4283 // -0.64831180f +
4284 // (0.91751397f +
4285 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
4286 //
4287 // error 0.00019228036, which is better than 12 bits
4288 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4289 getF32Constant(DAG, 0x3d431f31));
4290 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4291 getF32Constant(DAG, 0x3ea21fb2));
4292 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4293 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4294 getF32Constant(DAG, 0x3f6ae232));
4295 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4296 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4297 getF32Constant(DAG, 0x3f25f7c3));
4298 } else { // LimitFloatPrecision <= 18
4299 // For floating-point precision of 18:
4300 //
4301 // Log10ofMantissa =
4302 // -0.84299375f +
4303 // (1.5327582f +
4304 // (-1.0688956f +
4305 // (0.49102474f +
4306 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
4307 //
4308 // error 0.0000037995730, which is better than 18 bits
4309 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4310 getF32Constant(DAG, 0x3c5d51ce));
4311 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4312 getF32Constant(DAG, 0x3e00685a));
4313 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4314 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4315 getF32Constant(DAG, 0x3efb6798));
4316 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4317 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4318 getF32Constant(DAG, 0x3f88d192));
4319 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4320 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4321 getF32Constant(DAG, 0x3fc4316c));
4322 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4323 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
4324 getF32Constant(DAG, 0x3f57ce70));
4325 }
4326
4327 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
4328 }
4329
4330 // No special expansion.
4331 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
4332 }
4333
4334 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
4335 /// limited-precision mode.
expandExp2(SDLoc dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI)4336 static SDValue expandExp2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4337 const TargetLowering &TLI) {
4338 if (Op.getValueType() == MVT::f32 &&
4339 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4340 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
4341
4342 // FractionalPartOfX = x - (float)IntegerPartOfX;
4343 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4344 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
4345
4346 // IntegerPartOfX <<= 23;
4347 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4348 DAG.getConstant(23, TLI.getPointerTy()));
4349
4350 SDValue TwoToFractionalPartOfX;
4351 if (LimitFloatPrecision <= 6) {
4352 // For floating-point precision of 6:
4353 //
4354 // TwoToFractionalPartOfX =
4355 // 0.997535578f +
4356 // (0.735607626f + 0.252464424f * x) * x;
4357 //
4358 // error 0.0144103317, which is 6 bits
4359 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4360 getF32Constant(DAG, 0x3e814304));
4361 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4362 getF32Constant(DAG, 0x3f3c50c8));
4363 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4364 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4365 getF32Constant(DAG, 0x3f7f5e7e));
4366 } else if (LimitFloatPrecision <= 12) {
4367 // For floating-point precision of 12:
4368 //
4369 // TwoToFractionalPartOfX =
4370 // 0.999892986f +
4371 // (0.696457318f +
4372 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4373 //
4374 // error 0.000107046256, which is 13 to 14 bits
4375 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4376 getF32Constant(DAG, 0x3da235e3));
4377 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4378 getF32Constant(DAG, 0x3e65b8f3));
4379 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4380 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4381 getF32Constant(DAG, 0x3f324b07));
4382 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4383 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4384 getF32Constant(DAG, 0x3f7ff8fd));
4385 } else { // LimitFloatPrecision <= 18
4386 // For floating-point precision of 18:
4387 //
4388 // TwoToFractionalPartOfX =
4389 // 0.999999982f +
4390 // (0.693148872f +
4391 // (0.240227044f +
4392 // (0.554906021e-1f +
4393 // (0.961591928e-2f +
4394 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4395 // error 2.47208000*10^(-7), which is better than 18 bits
4396 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4397 getF32Constant(DAG, 0x3924b03e));
4398 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4399 getF32Constant(DAG, 0x3ab24b87));
4400 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4401 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4402 getF32Constant(DAG, 0x3c1d8c17));
4403 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4404 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4405 getF32Constant(DAG, 0x3d634a1d));
4406 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4407 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4408 getF32Constant(DAG, 0x3e75fe14));
4409 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4410 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4411 getF32Constant(DAG, 0x3f317234));
4412 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4413 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4414 getF32Constant(DAG, 0x3f800000));
4415 }
4416
4417 // Add the exponent into the result in integer domain.
4418 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32,
4419 TwoToFractionalPartOfX);
4420 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4421 DAG.getNode(ISD::ADD, dl, MVT::i32,
4422 t13, IntegerPartOfX));
4423 }
4424
4425 // No special expansion.
4426 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
4427 }
4428
4429 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
4430 /// limited-precision mode with x == 10.0f.
expandPow(SDLoc dl,SDValue LHS,SDValue RHS,SelectionDAG & DAG,const TargetLowering & TLI)4431 static SDValue expandPow(SDLoc dl, SDValue LHS, SDValue RHS,
4432 SelectionDAG &DAG, const TargetLowering &TLI) {
4433 bool IsExp10 = false;
4434 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
4435 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4436 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
4437 APFloat Ten(10.0f);
4438 IsExp10 = LHSC->isExactlyValue(Ten);
4439 }
4440 }
4441
4442 if (IsExp10) {
4443 // Put the exponent in the right bit position for later addition to the
4444 // final result:
4445 //
4446 // #define LOG2OF10 3.3219281f
4447 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
4448 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
4449 getF32Constant(DAG, 0x40549a78));
4450 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4451
4452 // FractionalPartOfX = x - (float)IntegerPartOfX;
4453 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4454 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4455
4456 // IntegerPartOfX <<= 23;
4457 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4458 DAG.getConstant(23, TLI.getPointerTy()));
4459
4460 SDValue TwoToFractionalPartOfX;
4461 if (LimitFloatPrecision <= 6) {
4462 // For floating-point precision of 6:
4463 //
4464 // twoToFractionalPartOfX =
4465 // 0.997535578f +
4466 // (0.735607626f + 0.252464424f * x) * x;
4467 //
4468 // error 0.0144103317, which is 6 bits
4469 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4470 getF32Constant(DAG, 0x3e814304));
4471 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4472 getF32Constant(DAG, 0x3f3c50c8));
4473 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4474 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4475 getF32Constant(DAG, 0x3f7f5e7e));
4476 } else if (LimitFloatPrecision <= 12) {
4477 // For floating-point precision of 12:
4478 //
4479 // TwoToFractionalPartOfX =
4480 // 0.999892986f +
4481 // (0.696457318f +
4482 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4483 //
4484 // error 0.000107046256, which is 13 to 14 bits
4485 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4486 getF32Constant(DAG, 0x3da235e3));
4487 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4488 getF32Constant(DAG, 0x3e65b8f3));
4489 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4490 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4491 getF32Constant(DAG, 0x3f324b07));
4492 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4493 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4494 getF32Constant(DAG, 0x3f7ff8fd));
4495 } else { // LimitFloatPrecision <= 18
4496 // For floating-point precision of 18:
4497 //
4498 // TwoToFractionalPartOfX =
4499 // 0.999999982f +
4500 // (0.693148872f +
4501 // (0.240227044f +
4502 // (0.554906021e-1f +
4503 // (0.961591928e-2f +
4504 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4505 // error 2.47208000*10^(-7), which is better than 18 bits
4506 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4507 getF32Constant(DAG, 0x3924b03e));
4508 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4509 getF32Constant(DAG, 0x3ab24b87));
4510 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4511 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4512 getF32Constant(DAG, 0x3c1d8c17));
4513 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4514 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4515 getF32Constant(DAG, 0x3d634a1d));
4516 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4517 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4518 getF32Constant(DAG, 0x3e75fe14));
4519 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4520 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4521 getF32Constant(DAG, 0x3f317234));
4522 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4523 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4524 getF32Constant(DAG, 0x3f800000));
4525 }
4526
4527 SDValue t13 = DAG.getNode(ISD::BITCAST, dl,MVT::i32,TwoToFractionalPartOfX);
4528 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4529 DAG.getNode(ISD::ADD, dl, MVT::i32,
4530 t13, IntegerPartOfX));
4531 }
4532
4533 // No special expansion.
4534 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
4535 }
4536
4537
4538 /// ExpandPowI - Expand a llvm.powi intrinsic.
ExpandPowI(SDLoc DL,SDValue LHS,SDValue RHS,SelectionDAG & DAG)4539 static SDValue ExpandPowI(SDLoc DL, SDValue LHS, SDValue RHS,
4540 SelectionDAG &DAG) {
4541 // If RHS is a constant, we can expand this out to a multiplication tree,
4542 // otherwise we end up lowering to a call to __powidf2 (for example). When
4543 // optimizing for size, we only want to do this if the expansion would produce
4544 // a small number of multiplies, otherwise we do the full expansion.
4545 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4546 // Get the exponent as a positive value.
4547 unsigned Val = RHSC->getSExtValue();
4548 if ((int)Val < 0) Val = -Val;
4549
4550 // powi(x, 0) -> 1.0
4551 if (Val == 0)
4552 return DAG.getConstantFP(1.0, LHS.getValueType());
4553
4554 const Function *F = DAG.getMachineFunction().getFunction();
4555 if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
4556 Attribute::OptimizeForSize) ||
4557 // If optimizing for size, don't insert too many multiplies. This
4558 // inserts up to 5 multiplies.
4559 CountPopulation_32(Val)+Log2_32(Val) < 7) {
4560 // We use the simple binary decomposition method to generate the multiply
4561 // sequence. There are more optimal ways to do this (for example,
4562 // powi(x,15) generates one more multiply than it should), but this has
4563 // the benefit of being both really simple and much better than a libcall.
4564 SDValue Res; // Logically starts equal to 1.0
4565 SDValue CurSquare = LHS;
4566 while (Val) {
4567 if (Val & 1) {
4568 if (Res.getNode())
4569 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
4570 else
4571 Res = CurSquare; // 1.0*CurSquare.
4572 }
4573
4574 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
4575 CurSquare, CurSquare);
4576 Val >>= 1;
4577 }
4578
4579 // If the original was negative, invert the result, producing 1/(x*x*x).
4580 if (RHSC->getSExtValue() < 0)
4581 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
4582 DAG.getConstantFP(1.0, LHS.getValueType()), Res);
4583 return Res;
4584 }
4585 }
4586
4587 // Otherwise, expand to a libcall.
4588 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
4589 }
4590
4591 // getTruncatedArgReg - Find underlying register used for an truncated
4592 // argument.
getTruncatedArgReg(const SDValue & N)4593 static unsigned getTruncatedArgReg(const SDValue &N) {
4594 if (N.getOpcode() != ISD::TRUNCATE)
4595 return 0;
4596
4597 const SDValue &Ext = N.getOperand(0);
4598 if (Ext.getOpcode() == ISD::AssertZext ||
4599 Ext.getOpcode() == ISD::AssertSext) {
4600 const SDValue &CFR = Ext.getOperand(0);
4601 if (CFR.getOpcode() == ISD::CopyFromReg)
4602 return cast<RegisterSDNode>(CFR.getOperand(1))->getReg();
4603 if (CFR.getOpcode() == ISD::TRUNCATE)
4604 return getTruncatedArgReg(CFR);
4605 }
4606 return 0;
4607 }
4608
4609 /// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
4610 /// argument, create the corresponding DBG_VALUE machine instruction for it now.
4611 /// At the end of instruction selection, they will be inserted to the entry BB.
EmitFuncArgumentDbgValue(const Value * V,MDNode * Variable,MDNode * Expr,int64_t Offset,bool IsIndirect,const SDValue & N)4612 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V,
4613 MDNode *Variable,
4614 MDNode *Expr, int64_t Offset,
4615 bool IsIndirect,
4616 const SDValue &N) {
4617 const Argument *Arg = dyn_cast<Argument>(V);
4618 if (!Arg)
4619 return false;
4620
4621 MachineFunction &MF = DAG.getMachineFunction();
4622 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
4623
4624 // Ignore inlined function arguments here.
4625 DIVariable DV(Variable);
4626 if (DV.isInlinedFnArgument(MF.getFunction()))
4627 return false;
4628
4629 Optional<MachineOperand> Op;
4630 // Some arguments' frame index is recorded during argument lowering.
4631 if (int FI = FuncInfo.getArgumentFrameIndex(Arg))
4632 Op = MachineOperand::CreateFI(FI);
4633
4634 if (!Op && N.getNode()) {
4635 unsigned Reg;
4636 if (N.getOpcode() == ISD::CopyFromReg)
4637 Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg();
4638 else
4639 Reg = getTruncatedArgReg(N);
4640 if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
4641 MachineRegisterInfo &RegInfo = MF.getRegInfo();
4642 unsigned PR = RegInfo.getLiveInPhysReg(Reg);
4643 if (PR)
4644 Reg = PR;
4645 }
4646 if (Reg)
4647 Op = MachineOperand::CreateReg(Reg, false);
4648 }
4649
4650 if (!Op) {
4651 // Check if ValueMap has reg number.
4652 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
4653 if (VMI != FuncInfo.ValueMap.end())
4654 Op = MachineOperand::CreateReg(VMI->second, false);
4655 }
4656
4657 if (!Op && N.getNode())
4658 // Check if frame index is available.
4659 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
4660 if (FrameIndexSDNode *FINode =
4661 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4662 Op = MachineOperand::CreateFI(FINode->getIndex());
4663
4664 if (!Op)
4665 return false;
4666
4667 if (Op->isReg())
4668 FuncInfo.ArgDbgValues.push_back(
4669 BuildMI(MF, getCurDebugLoc(), TII->get(TargetOpcode::DBG_VALUE),
4670 IsIndirect, Op->getReg(), Offset, Variable, Expr));
4671 else
4672 FuncInfo.ArgDbgValues.push_back(
4673 BuildMI(MF, getCurDebugLoc(), TII->get(TargetOpcode::DBG_VALUE))
4674 .addOperand(*Op)
4675 .addImm(Offset)
4676 .addMetadata(Variable)
4677 .addMetadata(Expr));
4678
4679 return true;
4680 }
4681
4682 // VisualStudio defines setjmp as _setjmp
4683 #if defined(_MSC_VER) && defined(setjmp) && \
4684 !defined(setjmp_undefined_for_msvc)
4685 # pragma push_macro("setjmp")
4686 # undef setjmp
4687 # define setjmp_undefined_for_msvc
4688 #endif
4689
4690 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
4691 /// we want to emit this as a call to a named external function, return the name
4692 /// otherwise lower it and return null.
4693 const char *
visitIntrinsicCall(const CallInst & I,unsigned Intrinsic)4694 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
4695 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4696 SDLoc sdl = getCurSDLoc();
4697 DebugLoc dl = getCurDebugLoc();
4698 SDValue Res;
4699
4700 switch (Intrinsic) {
4701 default:
4702 // By default, turn this into a target intrinsic node.
4703 visitTargetIntrinsic(I, Intrinsic);
4704 return nullptr;
4705 case Intrinsic::vastart: visitVAStart(I); return nullptr;
4706 case Intrinsic::vaend: visitVAEnd(I); return nullptr;
4707 case Intrinsic::vacopy: visitVACopy(I); return nullptr;
4708 case Intrinsic::returnaddress:
4709 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI.getPointerTy(),
4710 getValue(I.getArgOperand(0))));
4711 return nullptr;
4712 case Intrinsic::frameaddress:
4713 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI.getPointerTy(),
4714 getValue(I.getArgOperand(0))));
4715 return nullptr;
4716 case Intrinsic::read_register: {
4717 Value *Reg = I.getArgOperand(0);
4718 SDValue RegName =
4719 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
4720 EVT VT = TLI.getValueType(I.getType());
4721 setValue(&I, DAG.getNode(ISD::READ_REGISTER, sdl, VT, RegName));
4722 return nullptr;
4723 }
4724 case Intrinsic::write_register: {
4725 Value *Reg = I.getArgOperand(0);
4726 Value *RegValue = I.getArgOperand(1);
4727 SDValue Chain = getValue(RegValue).getOperand(0);
4728 SDValue RegName =
4729 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
4730 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
4731 RegName, getValue(RegValue)));
4732 return nullptr;
4733 }
4734 case Intrinsic::setjmp:
4735 return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
4736 case Intrinsic::longjmp:
4737 return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
4738 case Intrinsic::memcpy: {
4739 // Assert for address < 256 since we support only user defined address
4740 // spaces.
4741 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
4742 < 256 &&
4743 cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
4744 < 256 &&
4745 "Unknown address space");
4746 SDValue Op1 = getValue(I.getArgOperand(0));
4747 SDValue Op2 = getValue(I.getArgOperand(1));
4748 SDValue Op3 = getValue(I.getArgOperand(2));
4749 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4750 if (!Align)
4751 Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
4752 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4753 DAG.setRoot(DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, false,
4754 MachinePointerInfo(I.getArgOperand(0)),
4755 MachinePointerInfo(I.getArgOperand(1))));
4756 return nullptr;
4757 }
4758 case Intrinsic::memset: {
4759 // Assert for address < 256 since we support only user defined address
4760 // spaces.
4761 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
4762 < 256 &&
4763 "Unknown address space");
4764 SDValue Op1 = getValue(I.getArgOperand(0));
4765 SDValue Op2 = getValue(I.getArgOperand(1));
4766 SDValue Op3 = getValue(I.getArgOperand(2));
4767 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4768 if (!Align)
4769 Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
4770 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4771 DAG.setRoot(DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4772 MachinePointerInfo(I.getArgOperand(0))));
4773 return nullptr;
4774 }
4775 case Intrinsic::memmove: {
4776 // Assert for address < 256 since we support only user defined address
4777 // spaces.
4778 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
4779 < 256 &&
4780 cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
4781 < 256 &&
4782 "Unknown address space");
4783 SDValue Op1 = getValue(I.getArgOperand(0));
4784 SDValue Op2 = getValue(I.getArgOperand(1));
4785 SDValue Op3 = getValue(I.getArgOperand(2));
4786 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4787 if (!Align)
4788 Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
4789 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4790 DAG.setRoot(DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4791 MachinePointerInfo(I.getArgOperand(0)),
4792 MachinePointerInfo(I.getArgOperand(1))));
4793 return nullptr;
4794 }
4795 case Intrinsic::dbg_declare: {
4796 const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
4797 MDNode *Variable = DI.getVariable();
4798 MDNode *Expression = DI.getExpression();
4799 const Value *Address = DI.getAddress();
4800 DIVariable DIVar(Variable);
4801 assert((!DIVar || DIVar.isVariable()) &&
4802 "Variable in DbgDeclareInst should be either null or a DIVariable.");
4803 if (!Address || !DIVar) {
4804 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4805 return nullptr;
4806 }
4807
4808 // Check if address has undef value.
4809 if (isa<UndefValue>(Address) ||
4810 (Address->use_empty() && !isa<Argument>(Address))) {
4811 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4812 return nullptr;
4813 }
4814
4815 SDValue &N = NodeMap[Address];
4816 if (!N.getNode() && isa<Argument>(Address))
4817 // Check unused arguments map.
4818 N = UnusedArgNodeMap[Address];
4819 SDDbgValue *SDV;
4820 if (N.getNode()) {
4821 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
4822 Address = BCI->getOperand(0);
4823 // Parameters are handled specially.
4824 bool isParameter =
4825 (DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable ||
4826 isa<Argument>(Address));
4827
4828 const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
4829
4830 if (isParameter && !AI) {
4831 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
4832 if (FINode)
4833 // Byval parameter. We have a frame index at this point.
4834 SDV = DAG.getFrameIndexDbgValue(
4835 Variable, Expression, FINode->getIndex(), 0, dl, SDNodeOrder);
4836 else {
4837 // Address is an argument, so try to emit its dbg value using
4838 // virtual register info from the FuncInfo.ValueMap.
4839 EmitFuncArgumentDbgValue(Address, Variable, Expression, 0, false, N);
4840 return nullptr;
4841 }
4842 } else if (AI)
4843 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
4844 true, 0, dl, SDNodeOrder);
4845 else {
4846 // Can't do anything with other non-AI cases yet.
4847 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4848 DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t");
4849 DEBUG(Address->dump());
4850 return nullptr;
4851 }
4852 DAG.AddDbgValue(SDV, N.getNode(), isParameter);
4853 } else {
4854 // If Address is an argument then try to emit its dbg value using
4855 // virtual register info from the FuncInfo.ValueMap.
4856 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, 0, false,
4857 N)) {
4858 // If variable is pinned by a alloca in dominating bb then
4859 // use StaticAllocaMap.
4860 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
4861 if (AI->getParent() != DI.getParent()) {
4862 DenseMap<const AllocaInst*, int>::iterator SI =
4863 FuncInfo.StaticAllocaMap.find(AI);
4864 if (SI != FuncInfo.StaticAllocaMap.end()) {
4865 SDV = DAG.getFrameIndexDbgValue(Variable, Expression, SI->second,
4866 0, dl, SDNodeOrder);
4867 DAG.AddDbgValue(SDV, nullptr, false);
4868 return nullptr;
4869 }
4870 }
4871 }
4872 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4873 }
4874 }
4875 return nullptr;
4876 }
4877 case Intrinsic::dbg_value: {
4878 const DbgValueInst &DI = cast<DbgValueInst>(I);
4879 DIVariable DIVar(DI.getVariable());
4880 assert((!DIVar || DIVar.isVariable()) &&
4881 "Variable in DbgValueInst should be either null or a DIVariable.");
4882 if (!DIVar)
4883 return nullptr;
4884
4885 MDNode *Variable = DI.getVariable();
4886 MDNode *Expression = DI.getExpression();
4887 uint64_t Offset = DI.getOffset();
4888 const Value *V = DI.getValue();
4889 if (!V)
4890 return nullptr;
4891
4892 SDDbgValue *SDV;
4893 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
4894 SDV = DAG.getConstantDbgValue(Variable, Expression, V, Offset, dl,
4895 SDNodeOrder);
4896 DAG.AddDbgValue(SDV, nullptr, false);
4897 } else {
4898 // Do not use getValue() in here; we don't want to generate code at
4899 // this point if it hasn't been done yet.
4900 SDValue N = NodeMap[V];
4901 if (!N.getNode() && isa<Argument>(V))
4902 // Check unused arguments map.
4903 N = UnusedArgNodeMap[V];
4904 if (N.getNode()) {
4905 // A dbg.value for an alloca is always indirect.
4906 bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
4907 if (!EmitFuncArgumentDbgValue(V, Variable, Expression, Offset,
4908 IsIndirect, N)) {
4909 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
4910 IsIndirect, Offset, dl, SDNodeOrder);
4911 DAG.AddDbgValue(SDV, N.getNode(), false);
4912 }
4913 } else if (!V->use_empty() ) {
4914 // Do not call getValue(V) yet, as we don't want to generate code.
4915 // Remember it for later.
4916 DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
4917 DanglingDebugInfoMap[V] = DDI;
4918 } else {
4919 // We may expand this to cover more cases. One case where we have no
4920 // data available is an unreferenced parameter.
4921 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4922 }
4923 }
4924
4925 // Build a debug info table entry.
4926 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
4927 V = BCI->getOperand(0);
4928 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
4929 // Don't handle byval struct arguments or VLAs, for example.
4930 if (!AI) {
4931 DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
4932 DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
4933 return nullptr;
4934 }
4935 DenseMap<const AllocaInst*, int>::iterator SI =
4936 FuncInfo.StaticAllocaMap.find(AI);
4937 if (SI == FuncInfo.StaticAllocaMap.end())
4938 return nullptr; // VLAs.
4939 return nullptr;
4940 }
4941
4942 case Intrinsic::eh_typeid_for: {
4943 // Find the type id for the given typeinfo.
4944 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
4945 unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
4946 Res = DAG.getConstant(TypeID, MVT::i32);
4947 setValue(&I, Res);
4948 return nullptr;
4949 }
4950
4951 case Intrinsic::eh_return_i32:
4952 case Intrinsic::eh_return_i64:
4953 DAG.getMachineFunction().getMMI().setCallsEHReturn(true);
4954 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
4955 MVT::Other,
4956 getControlRoot(),
4957 getValue(I.getArgOperand(0)),
4958 getValue(I.getArgOperand(1))));
4959 return nullptr;
4960 case Intrinsic::eh_unwind_init:
4961 DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
4962 return nullptr;
4963 case Intrinsic::eh_dwarf_cfa: {
4964 SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl,
4965 TLI.getPointerTy());
4966 SDValue Offset = DAG.getNode(ISD::ADD, sdl,
4967 CfaArg.getValueType(),
4968 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, sdl,
4969 CfaArg.getValueType()),
4970 CfaArg);
4971 SDValue FA = DAG.getNode(ISD::FRAMEADDR, sdl, TLI.getPointerTy(),
4972 DAG.getConstant(0, TLI.getPointerTy()));
4973 setValue(&I, DAG.getNode(ISD::ADD, sdl, FA.getValueType(),
4974 FA, Offset));
4975 return nullptr;
4976 }
4977 case Intrinsic::eh_sjlj_callsite: {
4978 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
4979 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
4980 assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
4981 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
4982
4983 MMI.setCurrentCallSite(CI->getZExtValue());
4984 return nullptr;
4985 }
4986 case Intrinsic::eh_sjlj_functioncontext: {
4987 // Get and store the index of the function context.
4988 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
4989 AllocaInst *FnCtx =
4990 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
4991 int FI = FuncInfo.StaticAllocaMap[FnCtx];
4992 MFI->setFunctionContextIndex(FI);
4993 return nullptr;
4994 }
4995 case Intrinsic::eh_sjlj_setjmp: {
4996 SDValue Ops[2];
4997 Ops[0] = getRoot();
4998 Ops[1] = getValue(I.getArgOperand(0));
4999 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
5000 DAG.getVTList(MVT::i32, MVT::Other), Ops);
5001 setValue(&I, Op.getValue(0));
5002 DAG.setRoot(Op.getValue(1));
5003 return nullptr;
5004 }
5005 case Intrinsic::eh_sjlj_longjmp: {
5006 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
5007 getRoot(), getValue(I.getArgOperand(0))));
5008 return nullptr;
5009 }
5010
5011 case Intrinsic::masked_load:
5012 visitMaskedLoad(I);
5013 return nullptr;
5014 case Intrinsic::masked_store:
5015 visitMaskedStore(I);
5016 return nullptr;
5017 case Intrinsic::x86_mmx_pslli_w:
5018 case Intrinsic::x86_mmx_pslli_d:
5019 case Intrinsic::x86_mmx_pslli_q:
5020 case Intrinsic::x86_mmx_psrli_w:
5021 case Intrinsic::x86_mmx_psrli_d:
5022 case Intrinsic::x86_mmx_psrli_q:
5023 case Intrinsic::x86_mmx_psrai_w:
5024 case Intrinsic::x86_mmx_psrai_d: {
5025 SDValue ShAmt = getValue(I.getArgOperand(1));
5026 if (isa<ConstantSDNode>(ShAmt)) {
5027 visitTargetIntrinsic(I, Intrinsic);
5028 return nullptr;
5029 }
5030 unsigned NewIntrinsic = 0;
5031 EVT ShAmtVT = MVT::v2i32;
5032 switch (Intrinsic) {
5033 case Intrinsic::x86_mmx_pslli_w:
5034 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
5035 break;
5036 case Intrinsic::x86_mmx_pslli_d:
5037 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
5038 break;
5039 case Intrinsic::x86_mmx_pslli_q:
5040 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
5041 break;
5042 case Intrinsic::x86_mmx_psrli_w:
5043 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
5044 break;
5045 case Intrinsic::x86_mmx_psrli_d:
5046 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
5047 break;
5048 case Intrinsic::x86_mmx_psrli_q:
5049 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
5050 break;
5051 case Intrinsic::x86_mmx_psrai_w:
5052 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
5053 break;
5054 case Intrinsic::x86_mmx_psrai_d:
5055 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
5056 break;
5057 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5058 }
5059
5060 // The vector shift intrinsics with scalars uses 32b shift amounts but
5061 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
5062 // to be zero.
5063 // We must do this early because v2i32 is not a legal type.
5064 SDValue ShOps[2];
5065 ShOps[0] = ShAmt;
5066 ShOps[1] = DAG.getConstant(0, MVT::i32);
5067 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, ShOps);
5068 EVT DestVT = TLI.getValueType(I.getType());
5069 ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5070 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5071 DAG.getConstant(NewIntrinsic, MVT::i32),
5072 getValue(I.getArgOperand(0)), ShAmt);
5073 setValue(&I, Res);
5074 return nullptr;
5075 }
5076 case Intrinsic::x86_avx_vinsertf128_pd_256:
5077 case Intrinsic::x86_avx_vinsertf128_ps_256:
5078 case Intrinsic::x86_avx_vinsertf128_si_256:
5079 case Intrinsic::x86_avx2_vinserti128: {
5080 EVT DestVT = TLI.getValueType(I.getType());
5081 EVT ElVT = TLI.getValueType(I.getArgOperand(1)->getType());
5082 uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue() & 1) *
5083 ElVT.getVectorNumElements();
5084 Res =
5085 DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, DestVT,
5086 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
5087 DAG.getConstant(Idx, TLI.getVectorIdxTy()));
5088 setValue(&I, Res);
5089 return nullptr;
5090 }
5091 case Intrinsic::x86_avx_vextractf128_pd_256:
5092 case Intrinsic::x86_avx_vextractf128_ps_256:
5093 case Intrinsic::x86_avx_vextractf128_si_256:
5094 case Intrinsic::x86_avx2_vextracti128: {
5095 EVT DestVT = TLI.getValueType(I.getType());
5096 uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(1))->getZExtValue() & 1) *
5097 DestVT.getVectorNumElements();
5098 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, DestVT,
5099 getValue(I.getArgOperand(0)),
5100 DAG.getConstant(Idx, TLI.getVectorIdxTy()));
5101 setValue(&I, Res);
5102 return nullptr;
5103 }
5104 case Intrinsic::convertff:
5105 case Intrinsic::convertfsi:
5106 case Intrinsic::convertfui:
5107 case Intrinsic::convertsif:
5108 case Intrinsic::convertuif:
5109 case Intrinsic::convertss:
5110 case Intrinsic::convertsu:
5111 case Intrinsic::convertus:
5112 case Intrinsic::convertuu: {
5113 ISD::CvtCode Code = ISD::CVT_INVALID;
5114 switch (Intrinsic) {
5115 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5116 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
5117 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
5118 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
5119 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
5120 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
5121 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
5122 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
5123 case Intrinsic::convertus: Code = ISD::CVT_US; break;
5124 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
5125 }
5126 EVT DestVT = TLI.getValueType(I.getType());
5127 const Value *Op1 = I.getArgOperand(0);
5128 Res = DAG.getConvertRndSat(DestVT, sdl, getValue(Op1),
5129 DAG.getValueType(DestVT),
5130 DAG.getValueType(getValue(Op1).getValueType()),
5131 getValue(I.getArgOperand(1)),
5132 getValue(I.getArgOperand(2)),
5133 Code);
5134 setValue(&I, Res);
5135 return nullptr;
5136 }
5137 case Intrinsic::powi:
5138 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5139 getValue(I.getArgOperand(1)), DAG));
5140 return nullptr;
5141 case Intrinsic::log:
5142 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5143 return nullptr;
5144 case Intrinsic::log2:
5145 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5146 return nullptr;
5147 case Intrinsic::log10:
5148 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5149 return nullptr;
5150 case Intrinsic::exp:
5151 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5152 return nullptr;
5153 case Intrinsic::exp2:
5154 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5155 return nullptr;
5156 case Intrinsic::pow:
5157 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5158 getValue(I.getArgOperand(1)), DAG, TLI));
5159 return nullptr;
5160 case Intrinsic::sqrt:
5161 case Intrinsic::fabs:
5162 case Intrinsic::sin:
5163 case Intrinsic::cos:
5164 case Intrinsic::floor:
5165 case Intrinsic::ceil:
5166 case Intrinsic::trunc:
5167 case Intrinsic::rint:
5168 case Intrinsic::nearbyint:
5169 case Intrinsic::round: {
5170 unsigned Opcode;
5171 switch (Intrinsic) {
5172 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5173 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
5174 case Intrinsic::fabs: Opcode = ISD::FABS; break;
5175 case Intrinsic::sin: Opcode = ISD::FSIN; break;
5176 case Intrinsic::cos: Opcode = ISD::FCOS; break;
5177 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
5178 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
5179 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
5180 case Intrinsic::rint: Opcode = ISD::FRINT; break;
5181 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5182 case Intrinsic::round: Opcode = ISD::FROUND; break;
5183 }
5184
5185 setValue(&I, DAG.getNode(Opcode, sdl,
5186 getValue(I.getArgOperand(0)).getValueType(),
5187 getValue(I.getArgOperand(0))));
5188 return nullptr;
5189 }
5190 case Intrinsic::minnum:
5191 setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
5192 getValue(I.getArgOperand(0)).getValueType(),
5193 getValue(I.getArgOperand(0)),
5194 getValue(I.getArgOperand(1))));
5195 return nullptr;
5196 case Intrinsic::maxnum:
5197 setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
5198 getValue(I.getArgOperand(0)).getValueType(),
5199 getValue(I.getArgOperand(0)),
5200 getValue(I.getArgOperand(1))));
5201 return nullptr;
5202 case Intrinsic::copysign:
5203 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5204 getValue(I.getArgOperand(0)).getValueType(),
5205 getValue(I.getArgOperand(0)),
5206 getValue(I.getArgOperand(1))));
5207 return nullptr;
5208 case Intrinsic::fma:
5209 setValue(&I, DAG.getNode(ISD::FMA, sdl,
5210 getValue(I.getArgOperand(0)).getValueType(),
5211 getValue(I.getArgOperand(0)),
5212 getValue(I.getArgOperand(1)),
5213 getValue(I.getArgOperand(2))));
5214 return nullptr;
5215 case Intrinsic::fmuladd: {
5216 EVT VT = TLI.getValueType(I.getType());
5217 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5218 TLI.isFMAFasterThanFMulAndFAdd(VT)) {
5219 setValue(&I, DAG.getNode(ISD::FMA, sdl,
5220 getValue(I.getArgOperand(0)).getValueType(),
5221 getValue(I.getArgOperand(0)),
5222 getValue(I.getArgOperand(1)),
5223 getValue(I.getArgOperand(2))));
5224 } else {
5225 SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5226 getValue(I.getArgOperand(0)).getValueType(),
5227 getValue(I.getArgOperand(0)),
5228 getValue(I.getArgOperand(1)));
5229 SDValue Add = DAG.getNode(ISD::FADD, sdl,
5230 getValue(I.getArgOperand(0)).getValueType(),
5231 Mul,
5232 getValue(I.getArgOperand(2)));
5233 setValue(&I, Add);
5234 }
5235 return nullptr;
5236 }
5237 case Intrinsic::convert_to_fp16:
5238 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
5239 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
5240 getValue(I.getArgOperand(0)),
5241 DAG.getTargetConstant(0, MVT::i32))));
5242 return nullptr;
5243 case Intrinsic::convert_from_fp16:
5244 setValue(&I,
5245 DAG.getNode(ISD::FP_EXTEND, sdl, TLI.getValueType(I.getType()),
5246 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
5247 getValue(I.getArgOperand(0)))));
5248 return nullptr;
5249 case Intrinsic::pcmarker: {
5250 SDValue Tmp = getValue(I.getArgOperand(0));
5251 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5252 return nullptr;
5253 }
5254 case Intrinsic::readcyclecounter: {
5255 SDValue Op = getRoot();
5256 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5257 DAG.getVTList(MVT::i64, MVT::Other), Op);
5258 setValue(&I, Res);
5259 DAG.setRoot(Res.getValue(1));
5260 return nullptr;
5261 }
5262 case Intrinsic::bswap:
5263 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
5264 getValue(I.getArgOperand(0)).getValueType(),
5265 getValue(I.getArgOperand(0))));
5266 return nullptr;
5267 case Intrinsic::cttz: {
5268 SDValue Arg = getValue(I.getArgOperand(0));
5269 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5270 EVT Ty = Arg.getValueType();
5271 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
5272 sdl, Ty, Arg));
5273 return nullptr;
5274 }
5275 case Intrinsic::ctlz: {
5276 SDValue Arg = getValue(I.getArgOperand(0));
5277 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5278 EVT Ty = Arg.getValueType();
5279 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
5280 sdl, Ty, Arg));
5281 return nullptr;
5282 }
5283 case Intrinsic::ctpop: {
5284 SDValue Arg = getValue(I.getArgOperand(0));
5285 EVT Ty = Arg.getValueType();
5286 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
5287 return nullptr;
5288 }
5289 case Intrinsic::stacksave: {
5290 SDValue Op = getRoot();
5291 Res = DAG.getNode(ISD::STACKSAVE, sdl,
5292 DAG.getVTList(TLI.getPointerTy(), MVT::Other), Op);
5293 setValue(&I, Res);
5294 DAG.setRoot(Res.getValue(1));
5295 return nullptr;
5296 }
5297 case Intrinsic::stackrestore: {
5298 Res = getValue(I.getArgOperand(0));
5299 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
5300 return nullptr;
5301 }
5302 case Intrinsic::stackprotector: {
5303 // Emit code into the DAG to store the stack guard onto the stack.
5304 MachineFunction &MF = DAG.getMachineFunction();
5305 MachineFrameInfo *MFI = MF.getFrameInfo();
5306 EVT PtrTy = TLI.getPointerTy();
5307 SDValue Src, Chain = getRoot();
5308 const Value *Ptr = cast<LoadInst>(I.getArgOperand(0))->getPointerOperand();
5309 const GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr);
5310
5311 // See if Ptr is a bitcast. If it is, look through it and see if we can get
5312 // global variable __stack_chk_guard.
5313 if (!GV)
5314 if (const Operator *BC = dyn_cast<Operator>(Ptr))
5315 if (BC->getOpcode() == Instruction::BitCast)
5316 GV = dyn_cast<GlobalVariable>(BC->getOperand(0));
5317
5318 if (GV && TLI.useLoadStackGuardNode()) {
5319 // Emit a LOAD_STACK_GUARD node.
5320 MachineSDNode *Node = DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD,
5321 sdl, PtrTy, Chain);
5322 MachinePointerInfo MPInfo(GV);
5323 MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
5324 unsigned Flags = MachineMemOperand::MOLoad |
5325 MachineMemOperand::MOInvariant;
5326 *MemRefs = MF.getMachineMemOperand(MPInfo, Flags,
5327 PtrTy.getSizeInBits() / 8,
5328 DAG.getEVTAlignment(PtrTy));
5329 Node->setMemRefs(MemRefs, MemRefs + 1);
5330
5331 // Copy the guard value to a virtual register so that it can be
5332 // retrieved in the epilogue.
5333 Src = SDValue(Node, 0);
5334 const TargetRegisterClass *RC =
5335 TLI.getRegClassFor(Src.getSimpleValueType());
5336 unsigned Reg = MF.getRegInfo().createVirtualRegister(RC);
5337
5338 SPDescriptor.setGuardReg(Reg);
5339 Chain = DAG.getCopyToReg(Chain, sdl, Reg, Src);
5340 } else {
5341 Src = getValue(I.getArgOperand(0)); // The guard's value.
5342 }
5343
5344 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
5345
5346 int FI = FuncInfo.StaticAllocaMap[Slot];
5347 MFI->setStackProtectorIndex(FI);
5348
5349 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
5350
5351 // Store the stack protector onto the stack.
5352 Res = DAG.getStore(Chain, sdl, Src, FIN,
5353 MachinePointerInfo::getFixedStack(FI),
5354 true, false, 0);
5355 setValue(&I, Res);
5356 DAG.setRoot(Res);
5357 return nullptr;
5358 }
5359 case Intrinsic::objectsize: {
5360 // If we don't know by now, we're never going to know.
5361 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
5362
5363 assert(CI && "Non-constant type in __builtin_object_size?");
5364
5365 SDValue Arg = getValue(I.getCalledValue());
5366 EVT Ty = Arg.getValueType();
5367
5368 if (CI->isZero())
5369 Res = DAG.getConstant(-1ULL, Ty);
5370 else
5371 Res = DAG.getConstant(0, Ty);
5372
5373 setValue(&I, Res);
5374 return nullptr;
5375 }
5376 case Intrinsic::annotation:
5377 case Intrinsic::ptr_annotation:
5378 // Drop the intrinsic, but forward the value
5379 setValue(&I, getValue(I.getOperand(0)));
5380 return nullptr;
5381 case Intrinsic::assume:
5382 case Intrinsic::var_annotation:
5383 // Discard annotate attributes and assumptions
5384 return nullptr;
5385
5386 case Intrinsic::init_trampoline: {
5387 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
5388
5389 SDValue Ops[6];
5390 Ops[0] = getRoot();
5391 Ops[1] = getValue(I.getArgOperand(0));
5392 Ops[2] = getValue(I.getArgOperand(1));
5393 Ops[3] = getValue(I.getArgOperand(2));
5394 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
5395 Ops[5] = DAG.getSrcValue(F);
5396
5397 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
5398
5399 DAG.setRoot(Res);
5400 return nullptr;
5401 }
5402 case Intrinsic::adjust_trampoline: {
5403 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
5404 TLI.getPointerTy(),
5405 getValue(I.getArgOperand(0))));
5406 return nullptr;
5407 }
5408 case Intrinsic::gcroot:
5409 if (GFI) {
5410 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
5411 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
5412
5413 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
5414 GFI->addStackRoot(FI->getIndex(), TypeMap);
5415 }
5416 return nullptr;
5417 case Intrinsic::gcread:
5418 case Intrinsic::gcwrite:
5419 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
5420 case Intrinsic::flt_rounds:
5421 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
5422 return nullptr;
5423
5424 case Intrinsic::expect: {
5425 // Just replace __builtin_expect(exp, c) with EXP.
5426 setValue(&I, getValue(I.getArgOperand(0)));
5427 return nullptr;
5428 }
5429
5430 case Intrinsic::debugtrap:
5431 case Intrinsic::trap: {
5432 StringRef TrapFuncName = TM.Options.getTrapFunctionName();
5433 if (TrapFuncName.empty()) {
5434 ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
5435 ISD::TRAP : ISD::DEBUGTRAP;
5436 DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
5437 return nullptr;
5438 }
5439 TargetLowering::ArgListTy Args;
5440
5441 TargetLowering::CallLoweringInfo CLI(DAG);
5442 CLI.setDebugLoc(sdl).setChain(getRoot())
5443 .setCallee(CallingConv::C, I.getType(),
5444 DAG.getExternalSymbol(TrapFuncName.data(), TLI.getPointerTy()),
5445 std::move(Args), 0);
5446
5447 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
5448 DAG.setRoot(Result.second);
5449 return nullptr;
5450 }
5451
5452 case Intrinsic::uadd_with_overflow:
5453 case Intrinsic::sadd_with_overflow:
5454 case Intrinsic::usub_with_overflow:
5455 case Intrinsic::ssub_with_overflow:
5456 case Intrinsic::umul_with_overflow:
5457 case Intrinsic::smul_with_overflow: {
5458 ISD::NodeType Op;
5459 switch (Intrinsic) {
5460 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5461 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
5462 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
5463 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
5464 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
5465 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
5466 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
5467 }
5468 SDValue Op1 = getValue(I.getArgOperand(0));
5469 SDValue Op2 = getValue(I.getArgOperand(1));
5470
5471 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
5472 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
5473 return nullptr;
5474 }
5475 case Intrinsic::prefetch: {
5476 SDValue Ops[5];
5477 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
5478 Ops[0] = getRoot();
5479 Ops[1] = getValue(I.getArgOperand(0));
5480 Ops[2] = getValue(I.getArgOperand(1));
5481 Ops[3] = getValue(I.getArgOperand(2));
5482 Ops[4] = getValue(I.getArgOperand(3));
5483 DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
5484 DAG.getVTList(MVT::Other), Ops,
5485 EVT::getIntegerVT(*Context, 8),
5486 MachinePointerInfo(I.getArgOperand(0)),
5487 0, /* align */
5488 false, /* volatile */
5489 rw==0, /* read */
5490 rw==1)); /* write */
5491 return nullptr;
5492 }
5493 case Intrinsic::lifetime_start:
5494 case Intrinsic::lifetime_end: {
5495 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
5496 // Stack coloring is not enabled in O0, discard region information.
5497 if (TM.getOptLevel() == CodeGenOpt::None)
5498 return nullptr;
5499
5500 SmallVector<Value *, 4> Allocas;
5501 GetUnderlyingObjects(I.getArgOperand(1), Allocas, DL);
5502
5503 for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
5504 E = Allocas.end(); Object != E; ++Object) {
5505 AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
5506
5507 // Could not find an Alloca.
5508 if (!LifetimeObject)
5509 continue;
5510
5511 // First check that the Alloca is static, otherwise it won't have a
5512 // valid frame index.
5513 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
5514 if (SI == FuncInfo.StaticAllocaMap.end())
5515 return nullptr;
5516
5517 int FI = SI->second;
5518
5519 SDValue Ops[2];
5520 Ops[0] = getRoot();
5521 Ops[1] = DAG.getFrameIndex(FI, TLI.getPointerTy(), true);
5522 unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
5523
5524 Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
5525 DAG.setRoot(Res);
5526 }
5527 return nullptr;
5528 }
5529 case Intrinsic::invariant_start:
5530 // Discard region information.
5531 setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
5532 return nullptr;
5533 case Intrinsic::invariant_end:
5534 // Discard region information.
5535 return nullptr;
5536 case Intrinsic::stackprotectorcheck: {
5537 // Do not actually emit anything for this basic block. Instead we initialize
5538 // the stack protector descriptor and export the guard variable so we can
5539 // access it in FinishBasicBlock.
5540 const BasicBlock *BB = I.getParent();
5541 SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB], I);
5542 ExportFromCurrentBlock(SPDescriptor.getGuard());
5543
5544 // Flush our exports since we are going to process a terminator.
5545 (void)getControlRoot();
5546 return nullptr;
5547 }
5548 case Intrinsic::clear_cache:
5549 return TLI.getClearCacheBuiltinName();
5550 case Intrinsic::donothing:
5551 // ignore
5552 return nullptr;
5553 case Intrinsic::experimental_stackmap: {
5554 visitStackmap(I);
5555 return nullptr;
5556 }
5557 case Intrinsic::experimental_patchpoint_void:
5558 case Intrinsic::experimental_patchpoint_i64: {
5559 visitPatchpoint(&I);
5560 return nullptr;
5561 }
5562 case Intrinsic::experimental_gc_statepoint: {
5563 visitStatepoint(I);
5564 return nullptr;
5565 }
5566 case Intrinsic::experimental_gc_result_int:
5567 case Intrinsic::experimental_gc_result_float:
5568 case Intrinsic::experimental_gc_result_ptr: {
5569 visitGCResult(I);
5570 return nullptr;
5571 }
5572 case Intrinsic::experimental_gc_relocate: {
5573 visitGCRelocate(I);
5574 return nullptr;
5575 }
5576 case Intrinsic::instrprof_increment:
5577 llvm_unreachable("instrprof failed to lower an increment");
5578
5579 case Intrinsic::frameallocate: {
5580 MachineFunction &MF = DAG.getMachineFunction();
5581 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5582
5583 // Do the allocation and map it as a normal value.
5584 // FIXME: Maybe we should add this to the alloca map so that we don't have
5585 // to register allocate it?
5586 uint64_t Size = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
5587 int Alloc = MF.getFrameInfo()->CreateFrameAllocation(Size);
5588 MVT PtrVT = TLI.getPointerTy(0);
5589 SDValue FIVal = DAG.getFrameIndex(Alloc, PtrVT);
5590 setValue(&I, FIVal);
5591
5592 // Directly emit a FRAME_ALLOC machine instr. Label assignment emission is
5593 // the same on all targets.
5594 MCSymbol *FrameAllocSym =
5595 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(MF.getName());
5596 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
5597 TII->get(TargetOpcode::FRAME_ALLOC))
5598 .addSym(FrameAllocSym)
5599 .addFrameIndex(Alloc);
5600
5601 return nullptr;
5602 }
5603
5604 case Intrinsic::framerecover: {
5605 // i8* @llvm.framerecover(i8* %fn, i8* %fp)
5606 MachineFunction &MF = DAG.getMachineFunction();
5607 MVT PtrVT = TLI.getPointerTy(0);
5608
5609 // Get the symbol that defines the frame offset.
5610 Function *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
5611 MCSymbol *FrameAllocSym =
5612 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(Fn->getName());
5613
5614 // Create a TargetExternalSymbol for the label to avoid any target lowering
5615 // that would make this PC relative.
5616 StringRef Name = FrameAllocSym->getName();
5617 assert(Name.size() == strlen(Name.data()) && "not null terminated");
5618 SDValue OffsetSym = DAG.getTargetExternalSymbol(Name.data(), PtrVT);
5619 SDValue OffsetVal =
5620 DAG.getNode(ISD::FRAME_ALLOC_RECOVER, sdl, PtrVT, OffsetSym);
5621
5622 // Add the offset to the FP.
5623 Value *FP = I.getArgOperand(1);
5624 SDValue FPVal = getValue(FP);
5625 SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal);
5626 setValue(&I, Add);
5627
5628 return nullptr;
5629 }
5630 }
5631 }
5632
5633 std::pair<SDValue, SDValue>
lowerInvokable(TargetLowering::CallLoweringInfo & CLI,MachineBasicBlock * LandingPad)5634 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
5635 MachineBasicBlock *LandingPad) {
5636 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5637 MCSymbol *BeginLabel = nullptr;
5638
5639 if (LandingPad) {
5640 // Insert a label before the invoke call to mark the try range. This can be
5641 // used to detect deletion of the invoke via the MachineModuleInfo.
5642 BeginLabel = MMI.getContext().CreateTempSymbol();
5643
5644 // For SjLj, keep track of which landing pads go with which invokes
5645 // so as to maintain the ordering of pads in the LSDA.
5646 unsigned CallSiteIndex = MMI.getCurrentCallSite();
5647 if (CallSiteIndex) {
5648 MMI.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
5649 LPadToCallSiteMap[LandingPad].push_back(CallSiteIndex);
5650
5651 // Now that the call site is handled, stop tracking it.
5652 MMI.setCurrentCallSite(0);
5653 }
5654
5655 // Both PendingLoads and PendingExports must be flushed here;
5656 // this call might not return.
5657 (void)getRoot();
5658 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
5659
5660 CLI.setChain(getRoot());
5661 }
5662
5663 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
5664 std::pair<SDValue, SDValue> Result = TLI->LowerCallTo(CLI);
5665
5666 assert((CLI.IsTailCall || Result.second.getNode()) &&
5667 "Non-null chain expected with non-tail call!");
5668 assert((Result.second.getNode() || !Result.first.getNode()) &&
5669 "Null value expected with tail call!");
5670
5671 if (!Result.second.getNode()) {
5672 // As a special case, a null chain means that a tail call has been emitted
5673 // and the DAG root is already updated.
5674 HasTailCall = true;
5675
5676 // Since there's no actual continuation from this block, nothing can be
5677 // relying on us setting vregs for them.
5678 PendingExports.clear();
5679 } else {
5680 DAG.setRoot(Result.second);
5681 }
5682
5683 if (LandingPad) {
5684 // Insert a label at the end of the invoke call to mark the try range. This
5685 // can be used to detect deletion of the invoke via the MachineModuleInfo.
5686 MCSymbol *EndLabel = MMI.getContext().CreateTempSymbol();
5687 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
5688
5689 // Inform MachineModuleInfo of range.
5690 MMI.addInvoke(LandingPad, BeginLabel, EndLabel);
5691 }
5692
5693 return Result;
5694 }
5695
LowerCallTo(ImmutableCallSite CS,SDValue Callee,bool isTailCall,MachineBasicBlock * LandingPad)5696 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
5697 bool isTailCall,
5698 MachineBasicBlock *LandingPad) {
5699 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
5700 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
5701 Type *RetTy = FTy->getReturnType();
5702
5703 TargetLowering::ArgListTy Args;
5704 TargetLowering::ArgListEntry Entry;
5705 Args.reserve(CS.arg_size());
5706
5707 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
5708 i != e; ++i) {
5709 const Value *V = *i;
5710
5711 // Skip empty types
5712 if (V->getType()->isEmptyTy())
5713 continue;
5714
5715 SDValue ArgNode = getValue(V);
5716 Entry.Node = ArgNode; Entry.Ty = V->getType();
5717
5718 // Skip the first return-type Attribute to get to params.
5719 Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
5720 Args.push_back(Entry);
5721
5722 // If we have an explicit sret argument that is an Instruction, (i.e., it
5723 // might point to function-local memory), we can't meaningfully tail-call.
5724 if (Entry.isSRet && isa<Instruction>(V))
5725 isTailCall = false;
5726 }
5727
5728 // Check if target-independent constraints permit a tail call here.
5729 // Target-dependent constraints are checked within TLI->LowerCallTo.
5730 if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
5731 isTailCall = false;
5732
5733 TargetLowering::CallLoweringInfo CLI(DAG);
5734 CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot())
5735 .setCallee(RetTy, FTy, Callee, std::move(Args), CS)
5736 .setTailCall(isTailCall);
5737 std::pair<SDValue,SDValue> Result = lowerInvokable(CLI, LandingPad);
5738
5739 if (Result.first.getNode())
5740 setValue(CS.getInstruction(), Result.first);
5741 }
5742
5743 /// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
5744 /// value is equal or not-equal to zero.
IsOnlyUsedInZeroEqualityComparison(const Value * V)5745 static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
5746 for (const User *U : V->users()) {
5747 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
5748 if (IC->isEquality())
5749 if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
5750 if (C->isNullValue())
5751 continue;
5752 // Unknown instruction.
5753 return false;
5754 }
5755 return true;
5756 }
5757
getMemCmpLoad(const Value * PtrVal,MVT LoadVT,Type * LoadTy,SelectionDAGBuilder & Builder)5758 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
5759 Type *LoadTy,
5760 SelectionDAGBuilder &Builder) {
5761
5762 // Check to see if this load can be trivially constant folded, e.g. if the
5763 // input is from a string literal.
5764 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
5765 // Cast pointer to the type we really want to load.
5766 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
5767 PointerType::getUnqual(LoadTy));
5768
5769 if (const Constant *LoadCst =
5770 ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
5771 Builder.DL))
5772 return Builder.getValue(LoadCst);
5773 }
5774
5775 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
5776 // still constant memory, the input chain can be the entry node.
5777 SDValue Root;
5778 bool ConstantMemory = false;
5779
5780 // Do not serialize (non-volatile) loads of constant memory with anything.
5781 if (Builder.AA->pointsToConstantMemory(PtrVal)) {
5782 Root = Builder.DAG.getEntryNode();
5783 ConstantMemory = true;
5784 } else {
5785 // Do not serialize non-volatile loads against each other.
5786 Root = Builder.DAG.getRoot();
5787 }
5788
5789 SDValue Ptr = Builder.getValue(PtrVal);
5790 SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
5791 Ptr, MachinePointerInfo(PtrVal),
5792 false /*volatile*/,
5793 false /*nontemporal*/,
5794 false /*isinvariant*/, 1 /* align=1 */);
5795
5796 if (!ConstantMemory)
5797 Builder.PendingLoads.push_back(LoadVal.getValue(1));
5798 return LoadVal;
5799 }
5800
5801 /// processIntegerCallValue - Record the value for an instruction that
5802 /// produces an integer result, converting the type where necessary.
processIntegerCallValue(const Instruction & I,SDValue Value,bool IsSigned)5803 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
5804 SDValue Value,
5805 bool IsSigned) {
5806 EVT VT = DAG.getTargetLoweringInfo().getValueType(I.getType(), true);
5807 if (IsSigned)
5808 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
5809 else
5810 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
5811 setValue(&I, Value);
5812 }
5813
5814 /// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
5815 /// If so, return true and lower it, otherwise return false and it will be
5816 /// lowered like a normal call.
visitMemCmpCall(const CallInst & I)5817 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
5818 // Verify that the prototype makes sense. int memcmp(void*,void*,size_t)
5819 if (I.getNumArgOperands() != 3)
5820 return false;
5821
5822 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
5823 if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
5824 !I.getArgOperand(2)->getType()->isIntegerTy() ||
5825 !I.getType()->isIntegerTy())
5826 return false;
5827
5828 const Value *Size = I.getArgOperand(2);
5829 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
5830 if (CSize && CSize->getZExtValue() == 0) {
5831 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(I.getType(), true);
5832 setValue(&I, DAG.getConstant(0, CallVT));
5833 return true;
5834 }
5835
5836 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5837 std::pair<SDValue, SDValue> Res =
5838 TSI.EmitTargetCodeForMemcmp(DAG, getCurSDLoc(), DAG.getRoot(),
5839 getValue(LHS), getValue(RHS), getValue(Size),
5840 MachinePointerInfo(LHS),
5841 MachinePointerInfo(RHS));
5842 if (Res.first.getNode()) {
5843 processIntegerCallValue(I, Res.first, true);
5844 PendingLoads.push_back(Res.second);
5845 return true;
5846 }
5847
5848 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
5849 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
5850 if (CSize && IsOnlyUsedInZeroEqualityComparison(&I)) {
5851 bool ActuallyDoIt = true;
5852 MVT LoadVT;
5853 Type *LoadTy;
5854 switch (CSize->getZExtValue()) {
5855 default:
5856 LoadVT = MVT::Other;
5857 LoadTy = nullptr;
5858 ActuallyDoIt = false;
5859 break;
5860 case 2:
5861 LoadVT = MVT::i16;
5862 LoadTy = Type::getInt16Ty(CSize->getContext());
5863 break;
5864 case 4:
5865 LoadVT = MVT::i32;
5866 LoadTy = Type::getInt32Ty(CSize->getContext());
5867 break;
5868 case 8:
5869 LoadVT = MVT::i64;
5870 LoadTy = Type::getInt64Ty(CSize->getContext());
5871 break;
5872 /*
5873 case 16:
5874 LoadVT = MVT::v4i32;
5875 LoadTy = Type::getInt32Ty(CSize->getContext());
5876 LoadTy = VectorType::get(LoadTy, 4);
5877 break;
5878 */
5879 }
5880
5881 // This turns into unaligned loads. We only do this if the target natively
5882 // supports the MVT we'll be loading or if it is small enough (<= 4) that
5883 // we'll only produce a small number of byte loads.
5884
5885 // Require that we can find a legal MVT, and only do this if the target
5886 // supports unaligned loads of that type. Expanding into byte loads would
5887 // bloat the code.
5888 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5889 if (ActuallyDoIt && CSize->getZExtValue() > 4) {
5890 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
5891 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
5892 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
5893 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
5894 // TODO: Check alignment of src and dest ptrs.
5895 if (!TLI.isTypeLegal(LoadVT) ||
5896 !TLI.allowsMisalignedMemoryAccesses(LoadVT, SrcAS) ||
5897 !TLI.allowsMisalignedMemoryAccesses(LoadVT, DstAS))
5898 ActuallyDoIt = false;
5899 }
5900
5901 if (ActuallyDoIt) {
5902 SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this);
5903 SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this);
5904
5905 SDValue Res = DAG.getSetCC(getCurSDLoc(), MVT::i1, LHSVal, RHSVal,
5906 ISD::SETNE);
5907 processIntegerCallValue(I, Res, false);
5908 return true;
5909 }
5910 }
5911
5912
5913 return false;
5914 }
5915
5916 /// visitMemChrCall -- See if we can lower a memchr call into an optimized
5917 /// form. If so, return true and lower it, otherwise return false and it
5918 /// will be lowered like a normal call.
visitMemChrCall(const CallInst & I)5919 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
5920 // Verify that the prototype makes sense. void *memchr(void *, int, size_t)
5921 if (I.getNumArgOperands() != 3)
5922 return false;
5923
5924 const Value *Src = I.getArgOperand(0);
5925 const Value *Char = I.getArgOperand(1);
5926 const Value *Length = I.getArgOperand(2);
5927 if (!Src->getType()->isPointerTy() ||
5928 !Char->getType()->isIntegerTy() ||
5929 !Length->getType()->isIntegerTy() ||
5930 !I.getType()->isPointerTy())
5931 return false;
5932
5933 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5934 std::pair<SDValue, SDValue> Res =
5935 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
5936 getValue(Src), getValue(Char), getValue(Length),
5937 MachinePointerInfo(Src));
5938 if (Res.first.getNode()) {
5939 setValue(&I, Res.first);
5940 PendingLoads.push_back(Res.second);
5941 return true;
5942 }
5943
5944 return false;
5945 }
5946
5947 /// visitStrCpyCall -- See if we can lower a strcpy or stpcpy call into an
5948 /// optimized form. If so, return true and lower it, otherwise return false
5949 /// and it will be lowered like a normal call.
visitStrCpyCall(const CallInst & I,bool isStpcpy)5950 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
5951 // Verify that the prototype makes sense. char *strcpy(char *, char *)
5952 if (I.getNumArgOperands() != 2)
5953 return false;
5954
5955 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
5956 if (!Arg0->getType()->isPointerTy() ||
5957 !Arg1->getType()->isPointerTy() ||
5958 !I.getType()->isPointerTy())
5959 return false;
5960
5961 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5962 std::pair<SDValue, SDValue> Res =
5963 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
5964 getValue(Arg0), getValue(Arg1),
5965 MachinePointerInfo(Arg0),
5966 MachinePointerInfo(Arg1), isStpcpy);
5967 if (Res.first.getNode()) {
5968 setValue(&I, Res.first);
5969 DAG.setRoot(Res.second);
5970 return true;
5971 }
5972
5973 return false;
5974 }
5975
5976 /// visitStrCmpCall - See if we can lower a call to strcmp in an optimized form.
5977 /// If so, return true and lower it, otherwise return false and it will be
5978 /// lowered like a normal call.
visitStrCmpCall(const CallInst & I)5979 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
5980 // Verify that the prototype makes sense. int strcmp(void*,void*)
5981 if (I.getNumArgOperands() != 2)
5982 return false;
5983
5984 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
5985 if (!Arg0->getType()->isPointerTy() ||
5986 !Arg1->getType()->isPointerTy() ||
5987 !I.getType()->isIntegerTy())
5988 return false;
5989
5990 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5991 std::pair<SDValue, SDValue> Res =
5992 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
5993 getValue(Arg0), getValue(Arg1),
5994 MachinePointerInfo(Arg0),
5995 MachinePointerInfo(Arg1));
5996 if (Res.first.getNode()) {
5997 processIntegerCallValue(I, Res.first, true);
5998 PendingLoads.push_back(Res.second);
5999 return true;
6000 }
6001
6002 return false;
6003 }
6004
6005 /// visitStrLenCall -- See if we can lower a strlen call into an optimized
6006 /// form. If so, return true and lower it, otherwise return false and it
6007 /// will be lowered like a normal call.
visitStrLenCall(const CallInst & I)6008 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
6009 // Verify that the prototype makes sense. size_t strlen(char *)
6010 if (I.getNumArgOperands() != 1)
6011 return false;
6012
6013 const Value *Arg0 = I.getArgOperand(0);
6014 if (!Arg0->getType()->isPointerTy() || !I.getType()->isIntegerTy())
6015 return false;
6016
6017 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
6018 std::pair<SDValue, SDValue> Res =
6019 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
6020 getValue(Arg0), MachinePointerInfo(Arg0));
6021 if (Res.first.getNode()) {
6022 processIntegerCallValue(I, Res.first, false);
6023 PendingLoads.push_back(Res.second);
6024 return true;
6025 }
6026
6027 return false;
6028 }
6029
6030 /// visitStrNLenCall -- See if we can lower a strnlen call into an optimized
6031 /// form. If so, return true and lower it, otherwise return false and it
6032 /// will be lowered like a normal call.
visitStrNLenCall(const CallInst & I)6033 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
6034 // Verify that the prototype makes sense. size_t strnlen(char *, size_t)
6035 if (I.getNumArgOperands() != 2)
6036 return false;
6037
6038 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6039 if (!Arg0->getType()->isPointerTy() ||
6040 !Arg1->getType()->isIntegerTy() ||
6041 !I.getType()->isIntegerTy())
6042 return false;
6043
6044 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
6045 std::pair<SDValue, SDValue> Res =
6046 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
6047 getValue(Arg0), getValue(Arg1),
6048 MachinePointerInfo(Arg0));
6049 if (Res.first.getNode()) {
6050 processIntegerCallValue(I, Res.first, false);
6051 PendingLoads.push_back(Res.second);
6052 return true;
6053 }
6054
6055 return false;
6056 }
6057
6058 /// visitUnaryFloatCall - If a call instruction is a unary floating-point
6059 /// operation (as expected), translate it to an SDNode with the specified opcode
6060 /// and return true.
visitUnaryFloatCall(const CallInst & I,unsigned Opcode)6061 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
6062 unsigned Opcode) {
6063 // Sanity check that it really is a unary floating-point call.
6064 if (I.getNumArgOperands() != 1 ||
6065 !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
6066 I.getType() != I.getArgOperand(0)->getType() ||
6067 !I.onlyReadsMemory())
6068 return false;
6069
6070 SDValue Tmp = getValue(I.getArgOperand(0));
6071 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
6072 return true;
6073 }
6074
6075 /// visitBinaryFloatCall - If a call instruction is a binary floating-point
6076 /// operation (as expected), translate it to an SDNode with the specified opcode
6077 /// and return true.
visitBinaryFloatCall(const CallInst & I,unsigned Opcode)6078 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
6079 unsigned Opcode) {
6080 // Sanity check that it really is a binary floating-point call.
6081 if (I.getNumArgOperands() != 2 ||
6082 !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
6083 I.getType() != I.getArgOperand(0)->getType() ||
6084 I.getType() != I.getArgOperand(1)->getType() ||
6085 !I.onlyReadsMemory())
6086 return false;
6087
6088 SDValue Tmp0 = getValue(I.getArgOperand(0));
6089 SDValue Tmp1 = getValue(I.getArgOperand(1));
6090 EVT VT = Tmp0.getValueType();
6091 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
6092 return true;
6093 }
6094
visitCall(const CallInst & I)6095 void SelectionDAGBuilder::visitCall(const CallInst &I) {
6096 // Handle inline assembly differently.
6097 if (isa<InlineAsm>(I.getCalledValue())) {
6098 visitInlineAsm(&I);
6099 return;
6100 }
6101
6102 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6103 ComputeUsesVAFloatArgument(I, &MMI);
6104
6105 const char *RenameFn = nullptr;
6106 if (Function *F = I.getCalledFunction()) {
6107 if (F->isDeclaration()) {
6108 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
6109 if (unsigned IID = II->getIntrinsicID(F)) {
6110 RenameFn = visitIntrinsicCall(I, IID);
6111 if (!RenameFn)
6112 return;
6113 }
6114 }
6115 if (unsigned IID = F->getIntrinsicID()) {
6116 RenameFn = visitIntrinsicCall(I, IID);
6117 if (!RenameFn)
6118 return;
6119 }
6120 }
6121
6122 // Check for well-known libc/libm calls. If the function is internal, it
6123 // can't be a library call.
6124 LibFunc::Func Func;
6125 if (!F->hasLocalLinkage() && F->hasName() &&
6126 LibInfo->getLibFunc(F->getName(), Func) &&
6127 LibInfo->hasOptimizedCodeGen(Func)) {
6128 switch (Func) {
6129 default: break;
6130 case LibFunc::copysign:
6131 case LibFunc::copysignf:
6132 case LibFunc::copysignl:
6133 if (I.getNumArgOperands() == 2 && // Basic sanity checks.
6134 I.getArgOperand(0)->getType()->isFloatingPointTy() &&
6135 I.getType() == I.getArgOperand(0)->getType() &&
6136 I.getType() == I.getArgOperand(1)->getType() &&
6137 I.onlyReadsMemory()) {
6138 SDValue LHS = getValue(I.getArgOperand(0));
6139 SDValue RHS = getValue(I.getArgOperand(1));
6140 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
6141 LHS.getValueType(), LHS, RHS));
6142 return;
6143 }
6144 break;
6145 case LibFunc::fabs:
6146 case LibFunc::fabsf:
6147 case LibFunc::fabsl:
6148 if (visitUnaryFloatCall(I, ISD::FABS))
6149 return;
6150 break;
6151 case LibFunc::fmin:
6152 case LibFunc::fminf:
6153 case LibFunc::fminl:
6154 if (visitBinaryFloatCall(I, ISD::FMINNUM))
6155 return;
6156 break;
6157 case LibFunc::fmax:
6158 case LibFunc::fmaxf:
6159 case LibFunc::fmaxl:
6160 if (visitBinaryFloatCall(I, ISD::FMAXNUM))
6161 return;
6162 break;
6163 case LibFunc::sin:
6164 case LibFunc::sinf:
6165 case LibFunc::sinl:
6166 if (visitUnaryFloatCall(I, ISD::FSIN))
6167 return;
6168 break;
6169 case LibFunc::cos:
6170 case LibFunc::cosf:
6171 case LibFunc::cosl:
6172 if (visitUnaryFloatCall(I, ISD::FCOS))
6173 return;
6174 break;
6175 case LibFunc::sqrt:
6176 case LibFunc::sqrtf:
6177 case LibFunc::sqrtl:
6178 case LibFunc::sqrt_finite:
6179 case LibFunc::sqrtf_finite:
6180 case LibFunc::sqrtl_finite:
6181 if (visitUnaryFloatCall(I, ISD::FSQRT))
6182 return;
6183 break;
6184 case LibFunc::floor:
6185 case LibFunc::floorf:
6186 case LibFunc::floorl:
6187 if (visitUnaryFloatCall(I, ISD::FFLOOR))
6188 return;
6189 break;
6190 case LibFunc::nearbyint:
6191 case LibFunc::nearbyintf:
6192 case LibFunc::nearbyintl:
6193 if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
6194 return;
6195 break;
6196 case LibFunc::ceil:
6197 case LibFunc::ceilf:
6198 case LibFunc::ceill:
6199 if (visitUnaryFloatCall(I, ISD::FCEIL))
6200 return;
6201 break;
6202 case LibFunc::rint:
6203 case LibFunc::rintf:
6204 case LibFunc::rintl:
6205 if (visitUnaryFloatCall(I, ISD::FRINT))
6206 return;
6207 break;
6208 case LibFunc::round:
6209 case LibFunc::roundf:
6210 case LibFunc::roundl:
6211 if (visitUnaryFloatCall(I, ISD::FROUND))
6212 return;
6213 break;
6214 case LibFunc::trunc:
6215 case LibFunc::truncf:
6216 case LibFunc::truncl:
6217 if (visitUnaryFloatCall(I, ISD::FTRUNC))
6218 return;
6219 break;
6220 case LibFunc::log2:
6221 case LibFunc::log2f:
6222 case LibFunc::log2l:
6223 if (visitUnaryFloatCall(I, ISD::FLOG2))
6224 return;
6225 break;
6226 case LibFunc::exp2:
6227 case LibFunc::exp2f:
6228 case LibFunc::exp2l:
6229 if (visitUnaryFloatCall(I, ISD::FEXP2))
6230 return;
6231 break;
6232 case LibFunc::memcmp:
6233 if (visitMemCmpCall(I))
6234 return;
6235 break;
6236 case LibFunc::memchr:
6237 if (visitMemChrCall(I))
6238 return;
6239 break;
6240 case LibFunc::strcpy:
6241 if (visitStrCpyCall(I, false))
6242 return;
6243 break;
6244 case LibFunc::stpcpy:
6245 if (visitStrCpyCall(I, true))
6246 return;
6247 break;
6248 case LibFunc::strcmp:
6249 if (visitStrCmpCall(I))
6250 return;
6251 break;
6252 case LibFunc::strlen:
6253 if (visitStrLenCall(I))
6254 return;
6255 break;
6256 case LibFunc::strnlen:
6257 if (visitStrNLenCall(I))
6258 return;
6259 break;
6260 }
6261 }
6262 }
6263
6264 SDValue Callee;
6265 if (!RenameFn)
6266 Callee = getValue(I.getCalledValue());
6267 else
6268 Callee = DAG.getExternalSymbol(RenameFn,
6269 DAG.getTargetLoweringInfo().getPointerTy());
6270
6271 // Check if we can potentially perform a tail call. More detailed checking is
6272 // be done within LowerCallTo, after more information about the call is known.
6273 LowerCallTo(&I, Callee, I.isTailCall());
6274 }
6275
6276 namespace {
6277
6278 /// AsmOperandInfo - This contains information for each constraint that we are
6279 /// lowering.
6280 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
6281 public:
6282 /// CallOperand - If this is the result output operand or a clobber
6283 /// this is null, otherwise it is the incoming operand to the CallInst.
6284 /// This gets modified as the asm is processed.
6285 SDValue CallOperand;
6286
6287 /// AssignedRegs - If this is a register or register class operand, this
6288 /// contains the set of register corresponding to the operand.
6289 RegsForValue AssignedRegs;
6290
SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo & info)6291 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
6292 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) {
6293 }
6294
6295 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
6296 /// corresponds to. If there is no Value* for this operand, it returns
6297 /// MVT::Other.
getCallOperandValEVT(LLVMContext & Context,const TargetLowering & TLI,const DataLayout * DL) const6298 EVT getCallOperandValEVT(LLVMContext &Context,
6299 const TargetLowering &TLI,
6300 const DataLayout *DL) const {
6301 if (!CallOperandVal) return MVT::Other;
6302
6303 if (isa<BasicBlock>(CallOperandVal))
6304 return TLI.getPointerTy();
6305
6306 llvm::Type *OpTy = CallOperandVal->getType();
6307
6308 // FIXME: code duplicated from TargetLowering::ParseConstraints().
6309 // If this is an indirect operand, the operand is a pointer to the
6310 // accessed type.
6311 if (isIndirect) {
6312 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
6313 if (!PtrTy)
6314 report_fatal_error("Indirect operand for inline asm not a pointer!");
6315 OpTy = PtrTy->getElementType();
6316 }
6317
6318 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
6319 if (StructType *STy = dyn_cast<StructType>(OpTy))
6320 if (STy->getNumElements() == 1)
6321 OpTy = STy->getElementType(0);
6322
6323 // If OpTy is not a single value, it may be a struct/union that we
6324 // can tile with integers.
6325 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
6326 unsigned BitSize = DL->getTypeSizeInBits(OpTy);
6327 switch (BitSize) {
6328 default: break;
6329 case 1:
6330 case 8:
6331 case 16:
6332 case 32:
6333 case 64:
6334 case 128:
6335 OpTy = IntegerType::get(Context, BitSize);
6336 break;
6337 }
6338 }
6339
6340 return TLI.getValueType(OpTy, true);
6341 }
6342 };
6343
6344 typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
6345
6346 } // end anonymous namespace
6347
6348 /// GetRegistersForValue - Assign registers (virtual or physical) for the
6349 /// specified operand. We prefer to assign virtual registers, to allow the
6350 /// register allocator to handle the assignment process. However, if the asm
6351 /// uses features that we can't model on machineinstrs, we have SDISel do the
6352 /// allocation. This produces generally horrible, but correct, code.
6353 ///
6354 /// OpInfo describes the operand.
6355 ///
GetRegistersForValue(SelectionDAG & DAG,const TargetLowering & TLI,SDLoc DL,SDISelAsmOperandInfo & OpInfo)6356 static void GetRegistersForValue(SelectionDAG &DAG,
6357 const TargetLowering &TLI,
6358 SDLoc DL,
6359 SDISelAsmOperandInfo &OpInfo) {
6360 LLVMContext &Context = *DAG.getContext();
6361
6362 MachineFunction &MF = DAG.getMachineFunction();
6363 SmallVector<unsigned, 4> Regs;
6364
6365 // If this is a constraint for a single physreg, or a constraint for a
6366 // register class, find it.
6367 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
6368 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
6369 OpInfo.ConstraintVT);
6370
6371 unsigned NumRegs = 1;
6372 if (OpInfo.ConstraintVT != MVT::Other) {
6373 // If this is a FP input in an integer register (or visa versa) insert a bit
6374 // cast of the input value. More generally, handle any case where the input
6375 // value disagrees with the register class we plan to stick this in.
6376 if (OpInfo.Type == InlineAsm::isInput &&
6377 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
6378 // Try to convert to the first EVT that the reg class contains. If the
6379 // types are identical size, use a bitcast to convert (e.g. two differing
6380 // vector types).
6381 MVT RegVT = *PhysReg.second->vt_begin();
6382 if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) {
6383 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6384 RegVT, OpInfo.CallOperand);
6385 OpInfo.ConstraintVT = RegVT;
6386 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
6387 // If the input is a FP value and we want it in FP registers, do a
6388 // bitcast to the corresponding integer type. This turns an f64 value
6389 // into i64, which can be passed with two i32 values on a 32-bit
6390 // machine.
6391 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
6392 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6393 RegVT, OpInfo.CallOperand);
6394 OpInfo.ConstraintVT = RegVT;
6395 }
6396 }
6397
6398 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
6399 }
6400
6401 MVT RegVT;
6402 EVT ValueVT = OpInfo.ConstraintVT;
6403
6404 // If this is a constraint for a specific physical register, like {r17},
6405 // assign it now.
6406 if (unsigned AssignedReg = PhysReg.first) {
6407 const TargetRegisterClass *RC = PhysReg.second;
6408 if (OpInfo.ConstraintVT == MVT::Other)
6409 ValueVT = *RC->vt_begin();
6410
6411 // Get the actual register value type. This is important, because the user
6412 // may have asked for (e.g.) the AX register in i32 type. We need to
6413 // remember that AX is actually i16 to get the right extension.
6414 RegVT = *RC->vt_begin();
6415
6416 // This is a explicit reference to a physical register.
6417 Regs.push_back(AssignedReg);
6418
6419 // If this is an expanded reference, add the rest of the regs to Regs.
6420 if (NumRegs != 1) {
6421 TargetRegisterClass::iterator I = RC->begin();
6422 for (; *I != AssignedReg; ++I)
6423 assert(I != RC->end() && "Didn't find reg!");
6424
6425 // Already added the first reg.
6426 --NumRegs; ++I;
6427 for (; NumRegs; --NumRegs, ++I) {
6428 assert(I != RC->end() && "Ran out of registers to allocate!");
6429 Regs.push_back(*I);
6430 }
6431 }
6432
6433 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6434 return;
6435 }
6436
6437 // Otherwise, if this was a reference to an LLVM register class, create vregs
6438 // for this reference.
6439 if (const TargetRegisterClass *RC = PhysReg.second) {
6440 RegVT = *RC->vt_begin();
6441 if (OpInfo.ConstraintVT == MVT::Other)
6442 ValueVT = RegVT;
6443
6444 // Create the appropriate number of virtual registers.
6445 MachineRegisterInfo &RegInfo = MF.getRegInfo();
6446 for (; NumRegs; --NumRegs)
6447 Regs.push_back(RegInfo.createVirtualRegister(RC));
6448
6449 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6450 return;
6451 }
6452
6453 // Otherwise, we couldn't allocate enough registers for this.
6454 }
6455
6456 /// visitInlineAsm - Handle a call to an InlineAsm object.
6457 ///
visitInlineAsm(ImmutableCallSite CS)6458 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
6459 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
6460
6461 /// ConstraintOperands - Information about all of the constraints.
6462 SDISelAsmOperandInfoVector ConstraintOperands;
6463
6464 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6465 TargetLowering::AsmOperandInfoVector
6466 TargetConstraints = TLI.ParseConstraints(CS);
6467
6468 bool hasMemory = false;
6469
6470 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
6471 unsigned ResNo = 0; // ResNo - The result number of the next output.
6472 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
6473 ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
6474 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
6475
6476 MVT OpVT = MVT::Other;
6477
6478 // Compute the value type for each operand.
6479 switch (OpInfo.Type) {
6480 case InlineAsm::isOutput:
6481 // Indirect outputs just consume an argument.
6482 if (OpInfo.isIndirect) {
6483 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
6484 break;
6485 }
6486
6487 // The return value of the call is this value. As such, there is no
6488 // corresponding argument.
6489 assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
6490 if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
6491 OpVT = TLI.getSimpleValueType(STy->getElementType(ResNo));
6492 } else {
6493 assert(ResNo == 0 && "Asm only has one result!");
6494 OpVT = TLI.getSimpleValueType(CS.getType());
6495 }
6496 ++ResNo;
6497 break;
6498 case InlineAsm::isInput:
6499 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
6500 break;
6501 case InlineAsm::isClobber:
6502 // Nothing to do.
6503 break;
6504 }
6505
6506 // If this is an input or an indirect output, process the call argument.
6507 // BasicBlocks are labels, currently appearing only in asm's.
6508 if (OpInfo.CallOperandVal) {
6509 if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
6510 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
6511 } else {
6512 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
6513 }
6514
6515 OpVT =
6516 OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, DL).getSimpleVT();
6517 }
6518
6519 OpInfo.ConstraintVT = OpVT;
6520
6521 // Indirect operand accesses access memory.
6522 if (OpInfo.isIndirect)
6523 hasMemory = true;
6524 else {
6525 for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) {
6526 TargetLowering::ConstraintType
6527 CType = TLI.getConstraintType(OpInfo.Codes[j]);
6528 if (CType == TargetLowering::C_Memory) {
6529 hasMemory = true;
6530 break;
6531 }
6532 }
6533 }
6534 }
6535
6536 SDValue Chain, Flag;
6537
6538 // We won't need to flush pending loads if this asm doesn't touch
6539 // memory and is nonvolatile.
6540 if (hasMemory || IA->hasSideEffects())
6541 Chain = getRoot();
6542 else
6543 Chain = DAG.getRoot();
6544
6545 // Second pass over the constraints: compute which constraint option to use
6546 // and assign registers to constraints that want a specific physreg.
6547 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6548 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6549
6550 // If this is an output operand with a matching input operand, look up the
6551 // matching input. If their types mismatch, e.g. one is an integer, the
6552 // other is floating point, or their sizes are different, flag it as an
6553 // error.
6554 if (OpInfo.hasMatchingInput()) {
6555 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
6556
6557 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
6558 std::pair<unsigned, const TargetRegisterClass*> MatchRC =
6559 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
6560 OpInfo.ConstraintVT);
6561 std::pair<unsigned, const TargetRegisterClass*> InputRC =
6562 TLI.getRegForInlineAsmConstraint(Input.ConstraintCode,
6563 Input.ConstraintVT);
6564 if ((OpInfo.ConstraintVT.isInteger() !=
6565 Input.ConstraintVT.isInteger()) ||
6566 (MatchRC.second != InputRC.second)) {
6567 report_fatal_error("Unsupported asm: input constraint"
6568 " with a matching output constraint of"
6569 " incompatible type!");
6570 }
6571 Input.ConstraintVT = OpInfo.ConstraintVT;
6572 }
6573 }
6574
6575 // Compute the constraint code and ConstraintType to use.
6576 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
6577
6578 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6579 OpInfo.Type == InlineAsm::isClobber)
6580 continue;
6581
6582 // If this is a memory input, and if the operand is not indirect, do what we
6583 // need to to provide an address for the memory input.
6584 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6585 !OpInfo.isIndirect) {
6586 assert((OpInfo.isMultipleAlternative ||
6587 (OpInfo.Type == InlineAsm::isInput)) &&
6588 "Can only indirectify direct input operands!");
6589
6590 // Memory operands really want the address of the value. If we don't have
6591 // an indirect input, put it in the constpool if we can, otherwise spill
6592 // it to a stack slot.
6593 // TODO: This isn't quite right. We need to handle these according to
6594 // the addressing mode that the constraint wants. Also, this may take
6595 // an additional register for the computation and we don't want that
6596 // either.
6597
6598 // If the operand is a float, integer, or vector constant, spill to a
6599 // constant pool entry to get its address.
6600 const Value *OpVal = OpInfo.CallOperandVal;
6601 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
6602 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
6603 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
6604 TLI.getPointerTy());
6605 } else {
6606 // Otherwise, create a stack slot and emit a store to it before the
6607 // asm.
6608 Type *Ty = OpVal->getType();
6609 uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
6610 unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
6611 MachineFunction &MF = DAG.getMachineFunction();
6612 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
6613 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
6614 Chain = DAG.getStore(Chain, getCurSDLoc(),
6615 OpInfo.CallOperand, StackSlot,
6616 MachinePointerInfo::getFixedStack(SSFI),
6617 false, false, 0);
6618 OpInfo.CallOperand = StackSlot;
6619 }
6620
6621 // There is no longer a Value* corresponding to this operand.
6622 OpInfo.CallOperandVal = nullptr;
6623
6624 // It is now an indirect operand.
6625 OpInfo.isIndirect = true;
6626 }
6627
6628 // If this constraint is for a specific register, allocate it before
6629 // anything else.
6630 if (OpInfo.ConstraintType == TargetLowering::C_Register)
6631 GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
6632 }
6633
6634 // Second pass - Loop over all of the operands, assigning virtual or physregs
6635 // to register class operands.
6636 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6637 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6638
6639 // C_Register operands have already been allocated, Other/Memory don't need
6640 // to be.
6641 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
6642 GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
6643 }
6644
6645 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
6646 std::vector<SDValue> AsmNodeOperands;
6647 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
6648 AsmNodeOperands.push_back(
6649 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
6650 TLI.getPointerTy()));
6651
6652 // If we have a !srcloc metadata node associated with it, we want to attach
6653 // this to the ultimately generated inline asm machineinstr. To do this, we
6654 // pass in the third operand as this (potentially null) inline asm MDNode.
6655 const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
6656 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
6657
6658 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
6659 // bits as operand 3.
6660 unsigned ExtraInfo = 0;
6661 if (IA->hasSideEffects())
6662 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
6663 if (IA->isAlignStack())
6664 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
6665 // Set the asm dialect.
6666 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
6667
6668 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
6669 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
6670 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
6671
6672 // Compute the constraint code and ConstraintType to use.
6673 TLI.ComputeConstraintToUse(OpInfo, SDValue());
6674
6675 // Ideally, we would only check against memory constraints. However, the
6676 // meaning of an other constraint can be target-specific and we can't easily
6677 // reason about it. Therefore, be conservative and set MayLoad/MayStore
6678 // for other constriants as well.
6679 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
6680 OpInfo.ConstraintType == TargetLowering::C_Other) {
6681 if (OpInfo.Type == InlineAsm::isInput)
6682 ExtraInfo |= InlineAsm::Extra_MayLoad;
6683 else if (OpInfo.Type == InlineAsm::isOutput)
6684 ExtraInfo |= InlineAsm::Extra_MayStore;
6685 else if (OpInfo.Type == InlineAsm::isClobber)
6686 ExtraInfo |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
6687 }
6688 }
6689
6690 AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo,
6691 TLI.getPointerTy()));
6692
6693 // Loop over all of the inputs, copying the operand values into the
6694 // appropriate registers and processing the output regs.
6695 RegsForValue RetValRegs;
6696
6697 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
6698 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
6699
6700 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6701 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6702
6703 switch (OpInfo.Type) {
6704 case InlineAsm::isOutput: {
6705 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
6706 OpInfo.ConstraintType != TargetLowering::C_Register) {
6707 // Memory output, or 'other' output (e.g. 'X' constraint).
6708 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
6709
6710 // Add information to the INLINEASM node to know about this output.
6711 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
6712 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags,
6713 TLI.getPointerTy()));
6714 AsmNodeOperands.push_back(OpInfo.CallOperand);
6715 break;
6716 }
6717
6718 // Otherwise, this is a register or register class output.
6719
6720 // Copy the output from the appropriate register. Find a register that
6721 // we can use.
6722 if (OpInfo.AssignedRegs.Regs.empty()) {
6723 LLVMContext &Ctx = *DAG.getContext();
6724 Ctx.emitError(CS.getInstruction(),
6725 "couldn't allocate output register for constraint '" +
6726 Twine(OpInfo.ConstraintCode) + "'");
6727 return;
6728 }
6729
6730 // If this is an indirect operand, store through the pointer after the
6731 // asm.
6732 if (OpInfo.isIndirect) {
6733 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
6734 OpInfo.CallOperandVal));
6735 } else {
6736 // This is the result value of the call.
6737 assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
6738 // Concatenate this output onto the outputs list.
6739 RetValRegs.append(OpInfo.AssignedRegs);
6740 }
6741
6742 // Add information to the INLINEASM node to know that this register is
6743 // set.
6744 OpInfo.AssignedRegs
6745 .AddInlineAsmOperands(OpInfo.isEarlyClobber
6746 ? InlineAsm::Kind_RegDefEarlyClobber
6747 : InlineAsm::Kind_RegDef,
6748 false, 0, DAG, AsmNodeOperands);
6749 break;
6750 }
6751 case InlineAsm::isInput: {
6752 SDValue InOperandVal = OpInfo.CallOperand;
6753
6754 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
6755 // If this is required to match an output register we have already set,
6756 // just use its register.
6757 unsigned OperandNo = OpInfo.getMatchedOperand();
6758
6759 // Scan until we find the definition we already emitted of this operand.
6760 // When we find it, create a RegsForValue operand.
6761 unsigned CurOp = InlineAsm::Op_FirstOperand;
6762 for (; OperandNo; --OperandNo) {
6763 // Advance to the next operand.
6764 unsigned OpFlag =
6765 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6766 assert((InlineAsm::isRegDefKind(OpFlag) ||
6767 InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
6768 InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?");
6769 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
6770 }
6771
6772 unsigned OpFlag =
6773 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6774 if (InlineAsm::isRegDefKind(OpFlag) ||
6775 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
6776 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
6777 if (OpInfo.isIndirect) {
6778 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
6779 LLVMContext &Ctx = *DAG.getContext();
6780 Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:"
6781 " don't know how to handle tied "
6782 "indirect register inputs");
6783 return;
6784 }
6785
6786 RegsForValue MatchedRegs;
6787 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
6788 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
6789 MatchedRegs.RegVTs.push_back(RegVT);
6790 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
6791 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
6792 i != e; ++i) {
6793 if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT))
6794 MatchedRegs.Regs.push_back(RegInfo.createVirtualRegister(RC));
6795 else {
6796 LLVMContext &Ctx = *DAG.getContext();
6797 Ctx.emitError(CS.getInstruction(),
6798 "inline asm error: This value"
6799 " type register class is not natively supported!");
6800 return;
6801 }
6802 }
6803 // Use the produced MatchedRegs object to
6804 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(),
6805 Chain, &Flag, CS.getInstruction());
6806 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
6807 true, OpInfo.getMatchedOperand(),
6808 DAG, AsmNodeOperands);
6809 break;
6810 }
6811
6812 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
6813 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
6814 "Unexpected number of operands");
6815 // Add information to the INLINEASM node to know about this input.
6816 // See InlineAsm.h isUseOperandTiedToDef.
6817 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
6818 OpInfo.getMatchedOperand());
6819 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
6820 TLI.getPointerTy()));
6821 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
6822 break;
6823 }
6824
6825 // Treat indirect 'X' constraint as memory.
6826 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
6827 OpInfo.isIndirect)
6828 OpInfo.ConstraintType = TargetLowering::C_Memory;
6829
6830 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
6831 std::vector<SDValue> Ops;
6832 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
6833 Ops, DAG);
6834 if (Ops.empty()) {
6835 LLVMContext &Ctx = *DAG.getContext();
6836 Ctx.emitError(CS.getInstruction(),
6837 "invalid operand for inline asm constraint '" +
6838 Twine(OpInfo.ConstraintCode) + "'");
6839 return;
6840 }
6841
6842 // Add information to the INLINEASM node to know about this input.
6843 unsigned ResOpType =
6844 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
6845 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6846 TLI.getPointerTy()));
6847 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
6848 break;
6849 }
6850
6851 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
6852 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
6853 assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
6854 "Memory operands expect pointer values");
6855
6856 // Add information to the INLINEASM node to know about this input.
6857 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
6858 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6859 TLI.getPointerTy()));
6860 AsmNodeOperands.push_back(InOperandVal);
6861 break;
6862 }
6863
6864 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
6865 OpInfo.ConstraintType == TargetLowering::C_Register) &&
6866 "Unknown constraint type!");
6867
6868 // TODO: Support this.
6869 if (OpInfo.isIndirect) {
6870 LLVMContext &Ctx = *DAG.getContext();
6871 Ctx.emitError(CS.getInstruction(),
6872 "Don't know how to handle indirect register inputs yet "
6873 "for constraint '" +
6874 Twine(OpInfo.ConstraintCode) + "'");
6875 return;
6876 }
6877
6878 // Copy the input into the appropriate registers.
6879 if (OpInfo.AssignedRegs.Regs.empty()) {
6880 LLVMContext &Ctx = *DAG.getContext();
6881 Ctx.emitError(CS.getInstruction(),
6882 "couldn't allocate input reg for constraint '" +
6883 Twine(OpInfo.ConstraintCode) + "'");
6884 return;
6885 }
6886
6887 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(),
6888 Chain, &Flag, CS.getInstruction());
6889
6890 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
6891 DAG, AsmNodeOperands);
6892 break;
6893 }
6894 case InlineAsm::isClobber: {
6895 // Add the clobbered value to the operand list, so that the register
6896 // allocator is aware that the physreg got clobbered.
6897 if (!OpInfo.AssignedRegs.Regs.empty())
6898 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
6899 false, 0, DAG,
6900 AsmNodeOperands);
6901 break;
6902 }
6903 }
6904 }
6905
6906 // Finish up input operands. Set the input chain and add the flag last.
6907 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
6908 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
6909
6910 Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
6911 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
6912 Flag = Chain.getValue(1);
6913
6914 // If this asm returns a register value, copy the result from that register
6915 // and set it as the value of the call.
6916 if (!RetValRegs.Regs.empty()) {
6917 SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
6918 Chain, &Flag, CS.getInstruction());
6919
6920 // FIXME: Why don't we do this for inline asms with MRVs?
6921 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
6922 EVT ResultType = TLI.getValueType(CS.getType());
6923
6924 // If any of the results of the inline asm is a vector, it may have the
6925 // wrong width/num elts. This can happen for register classes that can
6926 // contain multiple different value types. The preg or vreg allocated may
6927 // not have the same VT as was expected. Convert it to the right type
6928 // with bit_convert.
6929 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
6930 Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(),
6931 ResultType, Val);
6932
6933 } else if (ResultType != Val.getValueType() &&
6934 ResultType.isInteger() && Val.getValueType().isInteger()) {
6935 // If a result value was tied to an input value, the computed result may
6936 // have a wider width than the expected result. Extract the relevant
6937 // portion.
6938 Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val);
6939 }
6940
6941 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
6942 }
6943
6944 setValue(CS.getInstruction(), Val);
6945 // Don't need to use this as a chain in this case.
6946 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
6947 return;
6948 }
6949
6950 std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
6951
6952 // Process indirect outputs, first output all of the flagged copies out of
6953 // physregs.
6954 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
6955 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
6956 const Value *Ptr = IndirectStoresToEmit[i].second;
6957 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
6958 Chain, &Flag, IA);
6959 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
6960 }
6961
6962 // Emit the non-flagged stores from the physregs.
6963 SmallVector<SDValue, 8> OutChains;
6964 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
6965 SDValue Val = DAG.getStore(Chain, getCurSDLoc(),
6966 StoresToEmit[i].first,
6967 getValue(StoresToEmit[i].second),
6968 MachinePointerInfo(StoresToEmit[i].second),
6969 false, false, 0);
6970 OutChains.push_back(Val);
6971 }
6972
6973 if (!OutChains.empty())
6974 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
6975
6976 DAG.setRoot(Chain);
6977 }
6978
visitVAStart(const CallInst & I)6979 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
6980 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
6981 MVT::Other, getRoot(),
6982 getValue(I.getArgOperand(0)),
6983 DAG.getSrcValue(I.getArgOperand(0))));
6984 }
6985
visitVAArg(const VAArgInst & I)6986 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
6987 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6988 const DataLayout &DL = *TLI.getDataLayout();
6989 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurSDLoc(),
6990 getRoot(), getValue(I.getOperand(0)),
6991 DAG.getSrcValue(I.getOperand(0)),
6992 DL.getABITypeAlignment(I.getType()));
6993 setValue(&I, V);
6994 DAG.setRoot(V.getValue(1));
6995 }
6996
visitVAEnd(const CallInst & I)6997 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
6998 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
6999 MVT::Other, getRoot(),
7000 getValue(I.getArgOperand(0)),
7001 DAG.getSrcValue(I.getArgOperand(0))));
7002 }
7003
visitVACopy(const CallInst & I)7004 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
7005 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
7006 MVT::Other, getRoot(),
7007 getValue(I.getArgOperand(0)),
7008 getValue(I.getArgOperand(1)),
7009 DAG.getSrcValue(I.getArgOperand(0)),
7010 DAG.getSrcValue(I.getArgOperand(1))));
7011 }
7012
7013 /// \brief Lower an argument list according to the target calling convention.
7014 ///
7015 /// \return A tuple of <return-value, token-chain>
7016 ///
7017 /// This is a helper for lowering intrinsics that follow a target calling
7018 /// convention or require stack pointer adjustment. Only a subset of the
7019 /// intrinsic's operands need to participate in the calling convention.
7020 std::pair<SDValue, SDValue>
lowerCallOperands(ImmutableCallSite CS,unsigned ArgIdx,unsigned NumArgs,SDValue Callee,bool UseVoidTy,MachineBasicBlock * LandingPad,bool IsPatchPoint)7021 SelectionDAGBuilder::lowerCallOperands(ImmutableCallSite CS, unsigned ArgIdx,
7022 unsigned NumArgs, SDValue Callee,
7023 bool UseVoidTy,
7024 MachineBasicBlock *LandingPad,
7025 bool IsPatchPoint) {
7026 TargetLowering::ArgListTy Args;
7027 Args.reserve(NumArgs);
7028
7029 // Populate the argument list.
7030 // Attributes for args start at offset 1, after the return attribute.
7031 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
7032 ArgI != ArgE; ++ArgI) {
7033 const Value *V = CS->getOperand(ArgI);
7034
7035 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
7036
7037 TargetLowering::ArgListEntry Entry;
7038 Entry.Node = getValue(V);
7039 Entry.Ty = V->getType();
7040 Entry.setAttributes(&CS, AttrI);
7041 Args.push_back(Entry);
7042 }
7043
7044 Type *retTy = UseVoidTy ? Type::getVoidTy(*DAG.getContext()) : CS->getType();
7045 TargetLowering::CallLoweringInfo CLI(DAG);
7046 CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot())
7047 .setCallee(CS.getCallingConv(), retTy, Callee, std::move(Args), NumArgs)
7048 .setDiscardResult(CS->use_empty()).setIsPatchPoint(IsPatchPoint);
7049
7050 return lowerInvokable(CLI, LandingPad);
7051 }
7052
7053 /// \brief Add a stack map intrinsic call's live variable operands to a stackmap
7054 /// or patchpoint target node's operand list.
7055 ///
7056 /// Constants are converted to TargetConstants purely as an optimization to
7057 /// avoid constant materialization and register allocation.
7058 ///
7059 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
7060 /// generate addess computation nodes, and so ExpandISelPseudo can convert the
7061 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
7062 /// address materialization and register allocation, but may also be required
7063 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
7064 /// alloca in the entry block, then the runtime may assume that the alloca's
7065 /// StackMap location can be read immediately after compilation and that the
7066 /// location is valid at any point during execution (this is similar to the
7067 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
7068 /// only available in a register, then the runtime would need to trap when
7069 /// execution reaches the StackMap in order to read the alloca's location.
addStackMapLiveVars(ImmutableCallSite CS,unsigned StartIdx,SmallVectorImpl<SDValue> & Ops,SelectionDAGBuilder & Builder)7070 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
7071 SmallVectorImpl<SDValue> &Ops,
7072 SelectionDAGBuilder &Builder) {
7073 for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
7074 SDValue OpVal = Builder.getValue(CS.getArgument(i));
7075 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
7076 Ops.push_back(
7077 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
7078 Ops.push_back(
7079 Builder.DAG.getTargetConstant(C->getSExtValue(), MVT::i64));
7080 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
7081 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
7082 Ops.push_back(
7083 Builder.DAG.getTargetFrameIndex(FI->getIndex(), TLI.getPointerTy()));
7084 } else
7085 Ops.push_back(OpVal);
7086 }
7087 }
7088
7089 /// \brief Lower llvm.experimental.stackmap directly to its target opcode.
visitStackmap(const CallInst & CI)7090 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
7091 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
7092 // [live variables...])
7093
7094 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
7095
7096 SDValue Chain, InFlag, Callee, NullPtr;
7097 SmallVector<SDValue, 32> Ops;
7098
7099 SDLoc DL = getCurSDLoc();
7100 Callee = getValue(CI.getCalledValue());
7101 NullPtr = DAG.getIntPtrConstant(0, true);
7102
7103 // The stackmap intrinsic only records the live variables (the arguemnts
7104 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
7105 // intrinsic, this won't be lowered to a function call. This means we don't
7106 // have to worry about calling conventions and target specific lowering code.
7107 // Instead we perform the call lowering right here.
7108 //
7109 // chain, flag = CALLSEQ_START(chain, 0)
7110 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
7111 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
7112 //
7113 Chain = DAG.getCALLSEQ_START(getRoot(), NullPtr, DL);
7114 InFlag = Chain.getValue(1);
7115
7116 // Add the <id> and <numBytes> constants.
7117 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
7118 Ops.push_back(DAG.getTargetConstant(
7119 cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64));
7120 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
7121 Ops.push_back(DAG.getTargetConstant(
7122 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32));
7123
7124 // Push live variables for the stack map.
7125 addStackMapLiveVars(&CI, 2, Ops, *this);
7126
7127 // We are not pushing any register mask info here on the operands list,
7128 // because the stackmap doesn't clobber anything.
7129
7130 // Push the chain and the glue flag.
7131 Ops.push_back(Chain);
7132 Ops.push_back(InFlag);
7133
7134 // Create the STACKMAP node.
7135 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7136 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
7137 Chain = SDValue(SM, 0);
7138 InFlag = Chain.getValue(1);
7139
7140 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
7141
7142 // Stackmaps don't generate values, so nothing goes into the NodeMap.
7143
7144 // Set the root to the target-lowered call chain.
7145 DAG.setRoot(Chain);
7146
7147 // Inform the Frame Information that we have a stackmap in this function.
7148 FuncInfo.MF->getFrameInfo()->setHasStackMap();
7149 }
7150
7151 /// \brief Lower llvm.experimental.patchpoint directly to its target opcode.
visitPatchpoint(ImmutableCallSite CS,MachineBasicBlock * LandingPad)7152 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
7153 MachineBasicBlock *LandingPad) {
7154 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
7155 // i32 <numBytes>,
7156 // i8* <target>,
7157 // i32 <numArgs>,
7158 // [Args...],
7159 // [live variables...])
7160
7161 CallingConv::ID CC = CS.getCallingConv();
7162 bool IsAnyRegCC = CC == CallingConv::AnyReg;
7163 bool HasDef = !CS->getType()->isVoidTy();
7164 SDValue Callee = getValue(CS->getOperand(2)); // <target>
7165
7166 // Get the real number of arguments participating in the call <numArgs>
7167 SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos));
7168 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
7169
7170 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
7171 // Intrinsics include all meta-operands up to but not including CC.
7172 unsigned NumMetaOpers = PatchPointOpers::CCPos;
7173 assert(CS.arg_size() >= NumMetaOpers + NumArgs &&
7174 "Not enough arguments provided to the patchpoint intrinsic");
7175
7176 // For AnyRegCC the arguments are lowered later on manually.
7177 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
7178 std::pair<SDValue, SDValue> Result =
7179 lowerCallOperands(CS, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC,
7180 LandingPad, true);
7181
7182 SDNode *CallEnd = Result.second.getNode();
7183 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
7184 CallEnd = CallEnd->getOperand(0).getNode();
7185
7186 /// Get a call instruction from the call sequence chain.
7187 /// Tail calls are not allowed.
7188 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
7189 "Expected a callseq node.");
7190 SDNode *Call = CallEnd->getOperand(0).getNode();
7191 bool HasGlue = Call->getGluedNode();
7192
7193 // Replace the target specific call node with the patchable intrinsic.
7194 SmallVector<SDValue, 8> Ops;
7195
7196 // Add the <id> and <numBytes> constants.
7197 SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos));
7198 Ops.push_back(DAG.getTargetConstant(
7199 cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64));
7200 SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos));
7201 Ops.push_back(DAG.getTargetConstant(
7202 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32));
7203
7204 // Assume that the Callee is a constant address.
7205 // FIXME: handle function symbols in the future.
7206 Ops.push_back(
7207 DAG.getIntPtrConstant(cast<ConstantSDNode>(Callee)->getZExtValue(),
7208 /*isTarget=*/true));
7209
7210 // Adjust <numArgs> to account for any arguments that have been passed on the
7211 // stack instead.
7212 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
7213 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
7214 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
7215 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, MVT::i32));
7216
7217 // Add the calling convention
7218 Ops.push_back(DAG.getTargetConstant((unsigned)CC, MVT::i32));
7219
7220 // Add the arguments we omitted previously. The register allocator should
7221 // place these in any free register.
7222 if (IsAnyRegCC)
7223 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
7224 Ops.push_back(getValue(CS.getArgument(i)));
7225
7226 // Push the arguments from the call instruction up to the register mask.
7227 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
7228 for (SDNode::op_iterator i = Call->op_begin()+2; i != e; ++i)
7229 Ops.push_back(*i);
7230
7231 // Push live variables for the stack map.
7232 addStackMapLiveVars(CS, NumMetaOpers + NumArgs, Ops, *this);
7233
7234 // Push the register mask info.
7235 if (HasGlue)
7236 Ops.push_back(*(Call->op_end()-2));
7237 else
7238 Ops.push_back(*(Call->op_end()-1));
7239
7240 // Push the chain (this is originally the first operand of the call, but
7241 // becomes now the last or second to last operand).
7242 Ops.push_back(*(Call->op_begin()));
7243
7244 // Push the glue flag (last operand).
7245 if (HasGlue)
7246 Ops.push_back(*(Call->op_end()-1));
7247
7248 SDVTList NodeTys;
7249 if (IsAnyRegCC && HasDef) {
7250 // Create the return types based on the intrinsic definition
7251 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7252 SmallVector<EVT, 3> ValueVTs;
7253 ComputeValueVTs(TLI, CS->getType(), ValueVTs);
7254 assert(ValueVTs.size() == 1 && "Expected only one return value type.");
7255
7256 // There is always a chain and a glue type at the end
7257 ValueVTs.push_back(MVT::Other);
7258 ValueVTs.push_back(MVT::Glue);
7259 NodeTys = DAG.getVTList(ValueVTs);
7260 } else
7261 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7262
7263 // Replace the target specific call node with a PATCHPOINT node.
7264 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
7265 getCurSDLoc(), NodeTys, Ops);
7266
7267 // Update the NodeMap.
7268 if (HasDef) {
7269 if (IsAnyRegCC)
7270 setValue(CS.getInstruction(), SDValue(MN, 0));
7271 else
7272 setValue(CS.getInstruction(), Result.first);
7273 }
7274
7275 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
7276 // call sequence. Furthermore the location of the chain and glue can change
7277 // when the AnyReg calling convention is used and the intrinsic returns a
7278 // value.
7279 if (IsAnyRegCC && HasDef) {
7280 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
7281 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
7282 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
7283 } else
7284 DAG.ReplaceAllUsesWith(Call, MN);
7285 DAG.DeleteNode(Call);
7286
7287 // Inform the Frame Information that we have a patchpoint in this function.
7288 FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
7289 }
7290
7291 /// Returns an AttributeSet representing the attributes applied to the return
7292 /// value of the given call.
getReturnAttrs(TargetLowering::CallLoweringInfo & CLI)7293 static AttributeSet getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
7294 SmallVector<Attribute::AttrKind, 2> Attrs;
7295 if (CLI.RetSExt)
7296 Attrs.push_back(Attribute::SExt);
7297 if (CLI.RetZExt)
7298 Attrs.push_back(Attribute::ZExt);
7299 if (CLI.IsInReg)
7300 Attrs.push_back(Attribute::InReg);
7301
7302 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
7303 Attrs);
7304 }
7305
7306 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
7307 /// implementation, which just calls LowerCall.
7308 /// FIXME: When all targets are
7309 /// migrated to using LowerCall, this hook should be integrated into SDISel.
7310 std::pair<SDValue, SDValue>
LowerCallTo(TargetLowering::CallLoweringInfo & CLI) const7311 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
7312 // Handle the incoming return values from the call.
7313 CLI.Ins.clear();
7314 Type *OrigRetTy = CLI.RetTy;
7315 SmallVector<EVT, 4> RetTys;
7316 SmallVector<uint64_t, 4> Offsets;
7317 ComputeValueVTs(*this, CLI.RetTy, RetTys, &Offsets);
7318
7319 SmallVector<ISD::OutputArg, 4> Outs;
7320 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this);
7321
7322 bool CanLowerReturn =
7323 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
7324 CLI.IsVarArg, Outs, CLI.RetTy->getContext());
7325
7326 SDValue DemoteStackSlot;
7327 int DemoteStackIdx = -100;
7328 if (!CanLowerReturn) {
7329 // FIXME: equivalent assert?
7330 // assert(!CS.hasInAllocaArgument() &&
7331 // "sret demotion is incompatible with inalloca");
7332 uint64_t TySize = getDataLayout()->getTypeAllocSize(CLI.RetTy);
7333 unsigned Align = getDataLayout()->getPrefTypeAlignment(CLI.RetTy);
7334 MachineFunction &MF = CLI.DAG.getMachineFunction();
7335 DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
7336 Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy);
7337
7338 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getPointerTy());
7339 ArgListEntry Entry;
7340 Entry.Node = DemoteStackSlot;
7341 Entry.Ty = StackSlotPtrType;
7342 Entry.isSExt = false;
7343 Entry.isZExt = false;
7344 Entry.isInReg = false;
7345 Entry.isSRet = true;
7346 Entry.isNest = false;
7347 Entry.isByVal = false;
7348 Entry.isReturned = false;
7349 Entry.Alignment = Align;
7350 CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
7351 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
7352
7353 // sret demotion isn't compatible with tail-calls, since the sret argument
7354 // points into the callers stack frame.
7355 CLI.IsTailCall = false;
7356 } else {
7357 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7358 EVT VT = RetTys[I];
7359 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7360 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7361 for (unsigned i = 0; i != NumRegs; ++i) {
7362 ISD::InputArg MyFlags;
7363 MyFlags.VT = RegisterVT;
7364 MyFlags.ArgVT = VT;
7365 MyFlags.Used = CLI.IsReturnValueUsed;
7366 if (CLI.RetSExt)
7367 MyFlags.Flags.setSExt();
7368 if (CLI.RetZExt)
7369 MyFlags.Flags.setZExt();
7370 if (CLI.IsInReg)
7371 MyFlags.Flags.setInReg();
7372 CLI.Ins.push_back(MyFlags);
7373 }
7374 }
7375 }
7376
7377 // Handle all of the outgoing arguments.
7378 CLI.Outs.clear();
7379 CLI.OutVals.clear();
7380 ArgListTy &Args = CLI.getArgs();
7381 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
7382 SmallVector<EVT, 4> ValueVTs;
7383 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
7384 Type *FinalType = Args[i].Ty;
7385 if (Args[i].isByVal)
7386 FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
7387 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
7388 FinalType, CLI.CallConv, CLI.IsVarArg);
7389 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
7390 ++Value) {
7391 EVT VT = ValueVTs[Value];
7392 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
7393 SDValue Op = SDValue(Args[i].Node.getNode(),
7394 Args[i].Node.getResNo() + Value);
7395 ISD::ArgFlagsTy Flags;
7396 unsigned OriginalAlignment = getDataLayout()->getABITypeAlignment(ArgTy);
7397
7398 if (Args[i].isZExt)
7399 Flags.setZExt();
7400 if (Args[i].isSExt)
7401 Flags.setSExt();
7402 if (Args[i].isInReg)
7403 Flags.setInReg();
7404 if (Args[i].isSRet)
7405 Flags.setSRet();
7406 if (Args[i].isByVal)
7407 Flags.setByVal();
7408 if (Args[i].isInAlloca) {
7409 Flags.setInAlloca();
7410 // Set the byval flag for CCAssignFn callbacks that don't know about
7411 // inalloca. This way we can know how many bytes we should've allocated
7412 // and how many bytes a callee cleanup function will pop. If we port
7413 // inalloca to more targets, we'll have to add custom inalloca handling
7414 // in the various CC lowering callbacks.
7415 Flags.setByVal();
7416 }
7417 if (Args[i].isByVal || Args[i].isInAlloca) {
7418 PointerType *Ty = cast<PointerType>(Args[i].Ty);
7419 Type *ElementTy = Ty->getElementType();
7420 Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
7421 // For ByVal, alignment should come from FE. BE will guess if this
7422 // info is not there but there are cases it cannot get right.
7423 unsigned FrameAlign;
7424 if (Args[i].Alignment)
7425 FrameAlign = Args[i].Alignment;
7426 else
7427 FrameAlign = getByValTypeAlignment(ElementTy);
7428 Flags.setByValAlign(FrameAlign);
7429 }
7430 if (Args[i].isNest)
7431 Flags.setNest();
7432 if (NeedsRegBlock) {
7433 Flags.setInConsecutiveRegs();
7434 if (Value == NumValues - 1)
7435 Flags.setInConsecutiveRegsLast();
7436 }
7437 Flags.setOrigAlign(OriginalAlignment);
7438
7439 MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
7440 unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
7441 SmallVector<SDValue, 4> Parts(NumParts);
7442 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
7443
7444 if (Args[i].isSExt)
7445 ExtendKind = ISD::SIGN_EXTEND;
7446 else if (Args[i].isZExt)
7447 ExtendKind = ISD::ZERO_EXTEND;
7448
7449 // Conservatively only handle 'returned' on non-vectors for now
7450 if (Args[i].isReturned && !Op.getValueType().isVector()) {
7451 assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
7452 "unexpected use of 'returned'");
7453 // Before passing 'returned' to the target lowering code, ensure that
7454 // either the register MVT and the actual EVT are the same size or that
7455 // the return value and argument are extended in the same way; in these
7456 // cases it's safe to pass the argument register value unchanged as the
7457 // return register value (although it's at the target's option whether
7458 // to do so)
7459 // TODO: allow code generation to take advantage of partially preserved
7460 // registers rather than clobbering the entire register when the
7461 // parameter extension method is not compatible with the return
7462 // extension method
7463 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
7464 (ExtendKind != ISD::ANY_EXTEND &&
7465 CLI.RetSExt == Args[i].isSExt && CLI.RetZExt == Args[i].isZExt))
7466 Flags.setReturned();
7467 }
7468
7469 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
7470 CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
7471
7472 for (unsigned j = 0; j != NumParts; ++j) {
7473 // if it isn't first piece, alignment must be 1
7474 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
7475 i < CLI.NumFixedArgs,
7476 i, j*Parts[j].getValueType().getStoreSize());
7477 if (NumParts > 1 && j == 0)
7478 MyFlags.Flags.setSplit();
7479 else if (j != 0)
7480 MyFlags.Flags.setOrigAlign(1);
7481
7482 CLI.Outs.push_back(MyFlags);
7483 CLI.OutVals.push_back(Parts[j]);
7484 }
7485 }
7486 }
7487
7488 SmallVector<SDValue, 4> InVals;
7489 CLI.Chain = LowerCall(CLI, InVals);
7490
7491 // Verify that the target's LowerCall behaved as expected.
7492 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
7493 "LowerCall didn't return a valid chain!");
7494 assert((!CLI.IsTailCall || InVals.empty()) &&
7495 "LowerCall emitted a return value for a tail call!");
7496 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
7497 "LowerCall didn't emit the correct number of values!");
7498
7499 // For a tail call, the return value is merely live-out and there aren't
7500 // any nodes in the DAG representing it. Return a special value to
7501 // indicate that a tail call has been emitted and no more Instructions
7502 // should be processed in the current block.
7503 if (CLI.IsTailCall) {
7504 CLI.DAG.setRoot(CLI.Chain);
7505 return std::make_pair(SDValue(), SDValue());
7506 }
7507
7508 DEBUG(for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
7509 assert(InVals[i].getNode() &&
7510 "LowerCall emitted a null value!");
7511 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
7512 "LowerCall emitted a value with the wrong type!");
7513 });
7514
7515 SmallVector<SDValue, 4> ReturnValues;
7516 if (!CanLowerReturn) {
7517 // The instruction result is the result of loading from the
7518 // hidden sret parameter.
7519 SmallVector<EVT, 1> PVTs;
7520 Type *PtrRetTy = PointerType::getUnqual(OrigRetTy);
7521
7522 ComputeValueVTs(*this, PtrRetTy, PVTs);
7523 assert(PVTs.size() == 1 && "Pointers should fit in one register");
7524 EVT PtrVT = PVTs[0];
7525
7526 unsigned NumValues = RetTys.size();
7527 ReturnValues.resize(NumValues);
7528 SmallVector<SDValue, 4> Chains(NumValues);
7529
7530 for (unsigned i = 0; i < NumValues; ++i) {
7531 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
7532 CLI.DAG.getConstant(Offsets[i], PtrVT));
7533 SDValue L = CLI.DAG.getLoad(
7534 RetTys[i], CLI.DL, CLI.Chain, Add,
7535 MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]), false,
7536 false, false, 1);
7537 ReturnValues[i] = L;
7538 Chains[i] = L.getValue(1);
7539 }
7540
7541 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
7542 } else {
7543 // Collect the legal value parts into potentially illegal values
7544 // that correspond to the original function's return values.
7545 ISD::NodeType AssertOp = ISD::DELETED_NODE;
7546 if (CLI.RetSExt)
7547 AssertOp = ISD::AssertSext;
7548 else if (CLI.RetZExt)
7549 AssertOp = ISD::AssertZext;
7550 unsigned CurReg = 0;
7551 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7552 EVT VT = RetTys[I];
7553 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7554 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7555
7556 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
7557 NumRegs, RegisterVT, VT, nullptr,
7558 AssertOp));
7559 CurReg += NumRegs;
7560 }
7561
7562 // For a function returning void, there is no return value. We can't create
7563 // such a node, so we just return a null return value in that case. In
7564 // that case, nothing will actually look at the value.
7565 if (ReturnValues.empty())
7566 return std::make_pair(SDValue(), CLI.Chain);
7567 }
7568
7569 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
7570 CLI.DAG.getVTList(RetTys), ReturnValues);
7571 return std::make_pair(Res, CLI.Chain);
7572 }
7573
LowerOperationWrapper(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const7574 void TargetLowering::LowerOperationWrapper(SDNode *N,
7575 SmallVectorImpl<SDValue> &Results,
7576 SelectionDAG &DAG) const {
7577 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
7578 if (Res.getNode())
7579 Results.push_back(Res);
7580 }
7581
LowerOperation(SDValue Op,SelectionDAG & DAG) const7582 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
7583 llvm_unreachable("LowerOperation not implemented for this target!");
7584 }
7585
7586 void
CopyValueToVirtualRegister(const Value * V,unsigned Reg)7587 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
7588 SDValue Op = getNonRegisterValue(V);
7589 assert((Op.getOpcode() != ISD::CopyFromReg ||
7590 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
7591 "Copy from a reg to the same reg!");
7592 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
7593
7594 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7595 RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
7596 SDValue Chain = DAG.getEntryNode();
7597
7598 ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
7599 FuncInfo.PreferredExtendType.end())
7600 ? ISD::ANY_EXTEND
7601 : FuncInfo.PreferredExtendType[V];
7602 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
7603 PendingExports.push_back(Chain);
7604 }
7605
7606 #include "llvm/CodeGen/SelectionDAGISel.h"
7607
7608 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
7609 /// entry block, return true. This includes arguments used by switches, since
7610 /// the switch may expand into multiple basic blocks.
isOnlyUsedInEntryBlock(const Argument * A,bool FastISel)7611 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
7612 // With FastISel active, we may be splitting blocks, so force creation
7613 // of virtual registers for all non-dead arguments.
7614 if (FastISel)
7615 return A->use_empty();
7616
7617 const BasicBlock *Entry = A->getParent()->begin();
7618 for (const User *U : A->users())
7619 if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
7620 return false; // Use not in entry block.
7621
7622 return true;
7623 }
7624
LowerArguments(const Function & F)7625 void SelectionDAGISel::LowerArguments(const Function &F) {
7626 SelectionDAG &DAG = SDB->DAG;
7627 SDLoc dl = SDB->getCurSDLoc();
7628 const DataLayout *DL = TLI->getDataLayout();
7629 SmallVector<ISD::InputArg, 16> Ins;
7630
7631 if (!FuncInfo->CanLowerReturn) {
7632 // Put in an sret pointer parameter before all the other parameters.
7633 SmallVector<EVT, 1> ValueVTs;
7634 ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
7635
7636 // NOTE: Assuming that a pointer will never break down to more than one VT
7637 // or one register.
7638 ISD::ArgFlagsTy Flags;
7639 Flags.setSRet();
7640 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
7641 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
7642 ISD::InputArg::NoArgIndex, 0);
7643 Ins.push_back(RetArg);
7644 }
7645
7646 // Set up the incoming argument description vector.
7647 unsigned Idx = 1;
7648 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
7649 I != E; ++I, ++Idx) {
7650 SmallVector<EVT, 4> ValueVTs;
7651 ComputeValueVTs(*TLI, I->getType(), ValueVTs);
7652 bool isArgValueUsed = !I->use_empty();
7653 unsigned PartBase = 0;
7654 Type *FinalType = I->getType();
7655 if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
7656 FinalType = cast<PointerType>(FinalType)->getElementType();
7657 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
7658 FinalType, F.getCallingConv(), F.isVarArg());
7659 for (unsigned Value = 0, NumValues = ValueVTs.size();
7660 Value != NumValues; ++Value) {
7661 EVT VT = ValueVTs[Value];
7662 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
7663 ISD::ArgFlagsTy Flags;
7664 unsigned OriginalAlignment = DL->getABITypeAlignment(ArgTy);
7665
7666 if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
7667 Flags.setZExt();
7668 if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
7669 Flags.setSExt();
7670 if (F.getAttributes().hasAttribute(Idx, Attribute::InReg))
7671 Flags.setInReg();
7672 if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet))
7673 Flags.setSRet();
7674 if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
7675 Flags.setByVal();
7676 if (F.getAttributes().hasAttribute(Idx, Attribute::InAlloca)) {
7677 Flags.setInAlloca();
7678 // Set the byval flag for CCAssignFn callbacks that don't know about
7679 // inalloca. This way we can know how many bytes we should've allocated
7680 // and how many bytes a callee cleanup function will pop. If we port
7681 // inalloca to more targets, we'll have to add custom inalloca handling
7682 // in the various CC lowering callbacks.
7683 Flags.setByVal();
7684 }
7685 if (Flags.isByVal() || Flags.isInAlloca()) {
7686 PointerType *Ty = cast<PointerType>(I->getType());
7687 Type *ElementTy = Ty->getElementType();
7688 Flags.setByValSize(DL->getTypeAllocSize(ElementTy));
7689 // For ByVal, alignment should be passed from FE. BE will guess if
7690 // this info is not there but there are cases it cannot get right.
7691 unsigned FrameAlign;
7692 if (F.getParamAlignment(Idx))
7693 FrameAlign = F.getParamAlignment(Idx);
7694 else
7695 FrameAlign = TLI->getByValTypeAlignment(ElementTy);
7696 Flags.setByValAlign(FrameAlign);
7697 }
7698 if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
7699 Flags.setNest();
7700 if (NeedsRegBlock) {
7701 Flags.setInConsecutiveRegs();
7702 if (Value == NumValues - 1)
7703 Flags.setInConsecutiveRegsLast();
7704 }
7705 Flags.setOrigAlign(OriginalAlignment);
7706
7707 MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
7708 unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
7709 for (unsigned i = 0; i != NumRegs; ++i) {
7710 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
7711 Idx-1, PartBase+i*RegisterVT.getStoreSize());
7712 if (NumRegs > 1 && i == 0)
7713 MyFlags.Flags.setSplit();
7714 // if it isn't first piece, alignment must be 1
7715 else if (i > 0)
7716 MyFlags.Flags.setOrigAlign(1);
7717 Ins.push_back(MyFlags);
7718 }
7719 PartBase += VT.getStoreSize();
7720 }
7721 }
7722
7723 // Call the target to set up the argument values.
7724 SmallVector<SDValue, 8> InVals;
7725 SDValue NewRoot = TLI->LowerFormalArguments(
7726 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
7727
7728 // Verify that the target's LowerFormalArguments behaved as expected.
7729 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
7730 "LowerFormalArguments didn't return a valid chain!");
7731 assert(InVals.size() == Ins.size() &&
7732 "LowerFormalArguments didn't emit the correct number of values!");
7733 DEBUG({
7734 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
7735 assert(InVals[i].getNode() &&
7736 "LowerFormalArguments emitted a null value!");
7737 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
7738 "LowerFormalArguments emitted a value with the wrong type!");
7739 }
7740 });
7741
7742 // Update the DAG with the new chain value resulting from argument lowering.
7743 DAG.setRoot(NewRoot);
7744
7745 // Set up the argument values.
7746 unsigned i = 0;
7747 Idx = 1;
7748 if (!FuncInfo->CanLowerReturn) {
7749 // Create a virtual register for the sret pointer, and put in a copy
7750 // from the sret argument into it.
7751 SmallVector<EVT, 1> ValueVTs;
7752 ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
7753 MVT VT = ValueVTs[0].getSimpleVT();
7754 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
7755 ISD::NodeType AssertOp = ISD::DELETED_NODE;
7756 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
7757 RegVT, VT, nullptr, AssertOp);
7758
7759 MachineFunction& MF = SDB->DAG.getMachineFunction();
7760 MachineRegisterInfo& RegInfo = MF.getRegInfo();
7761 unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
7762 FuncInfo->DemoteRegister = SRetReg;
7763 NewRoot =
7764 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
7765 DAG.setRoot(NewRoot);
7766
7767 // i indexes lowered arguments. Bump it past the hidden sret argument.
7768 // Idx indexes LLVM arguments. Don't touch it.
7769 ++i;
7770 }
7771
7772 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
7773 ++I, ++Idx) {
7774 SmallVector<SDValue, 4> ArgValues;
7775 SmallVector<EVT, 4> ValueVTs;
7776 ComputeValueVTs(*TLI, I->getType(), ValueVTs);
7777 unsigned NumValues = ValueVTs.size();
7778
7779 // If this argument is unused then remember its value. It is used to generate
7780 // debugging information.
7781 if (I->use_empty() && NumValues) {
7782 SDB->setUnusedArgValue(I, InVals[i]);
7783
7784 // Also remember any frame index for use in FastISel.
7785 if (FrameIndexSDNode *FI =
7786 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
7787 FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
7788 }
7789
7790 for (unsigned Val = 0; Val != NumValues; ++Val) {
7791 EVT VT = ValueVTs[Val];
7792 MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
7793 unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
7794
7795 if (!I->use_empty()) {
7796 ISD::NodeType AssertOp = ISD::DELETED_NODE;
7797 if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
7798 AssertOp = ISD::AssertSext;
7799 else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
7800 AssertOp = ISD::AssertZext;
7801
7802 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i],
7803 NumParts, PartVT, VT,
7804 nullptr, AssertOp));
7805 }
7806
7807 i += NumParts;
7808 }
7809
7810 // We don't need to do anything else for unused arguments.
7811 if (ArgValues.empty())
7812 continue;
7813
7814 // Note down frame index.
7815 if (FrameIndexSDNode *FI =
7816 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
7817 FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
7818
7819 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
7820 SDB->getCurSDLoc());
7821
7822 SDB->setValue(I, Res);
7823 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
7824 if (LoadSDNode *LNode =
7825 dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
7826 if (FrameIndexSDNode *FI =
7827 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
7828 FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
7829 }
7830
7831 // If this argument is live outside of the entry block, insert a copy from
7832 // wherever we got it to the vreg that other BB's will reference it as.
7833 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
7834 // If we can, though, try to skip creating an unnecessary vreg.
7835 // FIXME: This isn't very clean... it would be nice to make this more
7836 // general. It's also subtly incompatible with the hacks FastISel
7837 // uses with vregs.
7838 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
7839 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
7840 FuncInfo->ValueMap[I] = Reg;
7841 continue;
7842 }
7843 }
7844 if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) {
7845 FuncInfo->InitializeRegForValue(I);
7846 SDB->CopyToExportRegsIfNeeded(I);
7847 }
7848 }
7849
7850 assert(i == InVals.size() && "Argument register count mismatch!");
7851
7852 // Finally, if the target has anything special to do, allow it to do so.
7853 // FIXME: this should insert code into the DAG!
7854 EmitFunctionEntryCode();
7855 }
7856
7857 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
7858 /// ensure constants are generated when needed. Remember the virtual registers
7859 /// that need to be added to the Machine PHI nodes as input. We cannot just
7860 /// directly add them, because expansion might result in multiple MBB's for one
7861 /// BB. As such, the start of the BB might correspond to a different MBB than
7862 /// the end.
7863 ///
7864 void
HandlePHINodesInSuccessorBlocks(const BasicBlock * LLVMBB)7865 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
7866 const TerminatorInst *TI = LLVMBB->getTerminator();
7867
7868 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
7869
7870 // Check successor nodes' PHI nodes that expect a constant to be available
7871 // from this block.
7872 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
7873 const BasicBlock *SuccBB = TI->getSuccessor(succ);
7874 if (!isa<PHINode>(SuccBB->begin())) continue;
7875 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
7876
7877 // If this terminator has multiple identical successors (common for
7878 // switches), only handle each succ once.
7879 if (!SuccsHandled.insert(SuccMBB).second)
7880 continue;
7881
7882 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
7883
7884 // At this point we know that there is a 1-1 correspondence between LLVM PHI
7885 // nodes and Machine PHI nodes, but the incoming operands have not been
7886 // emitted yet.
7887 for (BasicBlock::const_iterator I = SuccBB->begin();
7888 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
7889 // Ignore dead phi's.
7890 if (PN->use_empty()) continue;
7891
7892 // Skip empty types
7893 if (PN->getType()->isEmptyTy())
7894 continue;
7895
7896 unsigned Reg;
7897 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
7898
7899 if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
7900 unsigned &RegOut = ConstantsOut[C];
7901 if (RegOut == 0) {
7902 RegOut = FuncInfo.CreateRegs(C->getType());
7903 CopyValueToVirtualRegister(C, RegOut);
7904 }
7905 Reg = RegOut;
7906 } else {
7907 DenseMap<const Value *, unsigned>::iterator I =
7908 FuncInfo.ValueMap.find(PHIOp);
7909 if (I != FuncInfo.ValueMap.end())
7910 Reg = I->second;
7911 else {
7912 assert(isa<AllocaInst>(PHIOp) &&
7913 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
7914 "Didn't codegen value into a register!??");
7915 Reg = FuncInfo.CreateRegs(PHIOp->getType());
7916 CopyValueToVirtualRegister(PHIOp, Reg);
7917 }
7918 }
7919
7920 // Remember that this register needs to added to the machine PHI node as
7921 // the input for this MBB.
7922 SmallVector<EVT, 4> ValueVTs;
7923 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7924 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
7925 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
7926 EVT VT = ValueVTs[vti];
7927 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
7928 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
7929 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
7930 Reg += NumRegisters;
7931 }
7932 }
7933 }
7934
7935 ConstantsOut.clear();
7936 }
7937
7938 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
7939 /// is 0.
7940 MachineBasicBlock *
7941 SelectionDAGBuilder::StackProtectorDescriptor::
AddSuccessorMBB(const BasicBlock * BB,MachineBasicBlock * ParentMBB,bool IsLikely,MachineBasicBlock * SuccMBB)7942 AddSuccessorMBB(const BasicBlock *BB,
7943 MachineBasicBlock *ParentMBB,
7944 bool IsLikely,
7945 MachineBasicBlock *SuccMBB) {
7946 // If SuccBB has not been created yet, create it.
7947 if (!SuccMBB) {
7948 MachineFunction *MF = ParentMBB->getParent();
7949 MachineFunction::iterator BBI = ParentMBB;
7950 SuccMBB = MF->CreateMachineBasicBlock(BB);
7951 MF->insert(++BBI, SuccMBB);
7952 }
7953 // Add it as a successor of ParentMBB.
7954 ParentMBB->addSuccessor(
7955 SuccMBB, BranchProbabilityInfo::getBranchWeightStackProtector(IsLikely));
7956 return SuccMBB;
7957 }
7958