1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements routines for translating from LLVM IR into SelectionDAG IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SelectionDAGBuilder.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/BitVector.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/BranchProbabilityInfo.h" 22 #include "llvm/Analysis/ConstantFolding.h" 23 #include "llvm/Analysis/TargetLibraryInfo.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/CodeGen/FastISel.h" 26 #include "llvm/CodeGen/FunctionLoweringInfo.h" 27 #include "llvm/CodeGen/GCMetadata.h" 28 #include "llvm/CodeGen/GCStrategy.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunction.h" 31 #include "llvm/CodeGen/MachineInstrBuilder.h" 32 #include "llvm/CodeGen/MachineJumpTableInfo.h" 33 #include "llvm/CodeGen/MachineModuleInfo.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/SelectionDAG.h" 36 #include "llvm/CodeGen/StackMaps.h" 37 #include "llvm/CodeGen/WinEHFuncInfo.h" 38 #include "llvm/IR/CallingConv.h" 39 #include "llvm/IR/Constants.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DebugInfo.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/Function.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/InlineAsm.h" 46 #include "llvm/IR/Instructions.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/IR/Intrinsics.h" 49 #include "llvm/IR/LLVMContext.h" 50 #include "llvm/IR/Module.h" 51 #include "llvm/IR/Statepoint.h" 52 #include "llvm/MC/MCSymbol.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Debug.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/MathExtras.h" 57 #include "llvm/Support/raw_ostream.h" 58 #include "llvm/Target/TargetFrameLowering.h" 59 #include "llvm/Target/TargetInstrInfo.h" 60 #include "llvm/Target/TargetIntrinsicInfo.h" 61 #include "llvm/Target/TargetLowering.h" 62 #include "llvm/Target/TargetOptions.h" 63 #include "llvm/Target/TargetSelectionDAGInfo.h" 64 #include "llvm/Target/TargetSubtargetInfo.h" 65 #include <algorithm> 66 using namespace llvm; 67 68 #define DEBUG_TYPE "isel" 69 70 /// LimitFloatPrecision - Generate low-precision inline sequences for 71 /// some float libcalls (6, 8 or 12 bits). 72 static unsigned LimitFloatPrecision; 73 74 static cl::opt<unsigned, true> 75 LimitFPPrecision("limit-float-precision", 76 cl::desc("Generate low-precision inline sequences " 77 "for some float libcalls"), 78 cl::location(LimitFloatPrecision), 79 cl::init(0)); 80 81 static cl::opt<bool> 82 EnableFMFInDAG("enable-fmf-dag", cl::init(false), cl::Hidden, 83 cl::desc("Enable fast-math-flags for DAG nodes")); 84 85 // Limit the width of DAG chains. This is important in general to prevent 86 // DAG-based analysis from blowing up. For example, alias analysis and 87 // load clustering may not complete in reasonable time. It is difficult to 88 // recognize and avoid this situation within each individual analysis, and 89 // future analyses are likely to have the same behavior. Limiting DAG width is 90 // the safe approach and will be especially important with global DAGs. 91 // 92 // MaxParallelChains default is arbitrarily high to avoid affecting 93 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st 94 // sequence over this should have been converted to llvm.memcpy by the 95 // frontend. It easy to induce this behavior with .ll code such as: 96 // %buffer = alloca [4096 x i8] 97 // %data = load [4096 x i8]* %argPtr 98 // store [4096 x i8] %data, [4096 x i8]* %buffer 99 static const unsigned MaxParallelChains = 64; 100 101 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL, 102 const SDValue *Parts, unsigned NumParts, 103 MVT PartVT, EVT ValueVT, const Value *V); 104 105 /// getCopyFromParts - Create a value that contains the specified legal parts 106 /// combined into the value they represent. If the parts combine to a type 107 /// larger then ValueVT then AssertOp can be used to specify whether the extra 108 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT 109 /// (ISD::AssertSext). 110 static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL, 111 const SDValue *Parts, 112 unsigned NumParts, MVT PartVT, EVT ValueVT, 113 const Value *V, 114 ISD::NodeType AssertOp = ISD::DELETED_NODE) { 115 if (ValueVT.isVector()) 116 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, 117 PartVT, ValueVT, V); 118 119 assert(NumParts > 0 && "No parts to assemble!"); 120 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 121 SDValue Val = Parts[0]; 122 123 if (NumParts > 1) { 124 // Assemble the value from multiple parts. 125 if (ValueVT.isInteger()) { 126 unsigned PartBits = PartVT.getSizeInBits(); 127 unsigned ValueBits = ValueVT.getSizeInBits(); 128 129 // Assemble the power of 2 part. 130 unsigned RoundParts = NumParts & (NumParts - 1) ? 131 1 << Log2_32(NumParts) : NumParts; 132 unsigned RoundBits = PartBits * RoundParts; 133 EVT RoundVT = RoundBits == ValueBits ? 134 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits); 135 SDValue Lo, Hi; 136 137 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2); 138 139 if (RoundParts > 2) { 140 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, 141 PartVT, HalfVT, V); 142 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, 143 RoundParts / 2, PartVT, HalfVT, V); 144 } else { 145 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]); 146 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]); 147 } 148 149 if (DAG.getDataLayout().isBigEndian()) 150 std::swap(Lo, Hi); 151 152 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi); 153 154 if (RoundParts < NumParts) { 155 // Assemble the trailing non-power-of-2 part. 156 unsigned OddParts = NumParts - RoundParts; 157 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits); 158 Hi = getCopyFromParts(DAG, DL, 159 Parts + RoundParts, OddParts, PartVT, OddVT, V); 160 161 // Combine the round and odd parts. 162 Lo = Val; 163 if (DAG.getDataLayout().isBigEndian()) 164 std::swap(Lo, Hi); 165 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 166 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi); 167 Hi = 168 DAG.getNode(ISD::SHL, DL, TotalVT, Hi, 169 DAG.getConstant(Lo.getValueType().getSizeInBits(), DL, 170 TLI.getPointerTy(DAG.getDataLayout()))); 171 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo); 172 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi); 173 } 174 } else if (PartVT.isFloatingPoint()) { 175 // FP split into multiple FP parts (for ppcf128) 176 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && 177 "Unexpected split"); 178 SDValue Lo, Hi; 179 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]); 180 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]); 181 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout())) 182 std::swap(Lo, Hi); 183 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi); 184 } else { 185 // FP split into integer parts (soft fp) 186 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() && 187 !PartVT.isVector() && "Unexpected split"); 188 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 189 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V); 190 } 191 } 192 193 // There is now one part, held in Val. Correct it to match ValueVT. 194 EVT PartEVT = Val.getValueType(); 195 196 if (PartEVT == ValueVT) 197 return Val; 198 199 if (PartEVT.isInteger() && ValueVT.isInteger()) { 200 if (ValueVT.bitsLT(PartEVT)) { 201 // For a truncate, see if we have any information to 202 // indicate whether the truncated bits will always be 203 // zero or sign-extension. 204 if (AssertOp != ISD::DELETED_NODE) 205 Val = DAG.getNode(AssertOp, DL, PartEVT, Val, 206 DAG.getValueType(ValueVT)); 207 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 208 } 209 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val); 210 } 211 212 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 213 // FP_ROUND's are always exact here. 214 if (ValueVT.bitsLT(Val.getValueType())) 215 return DAG.getNode( 216 ISD::FP_ROUND, DL, ValueVT, Val, 217 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()))); 218 219 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val); 220 } 221 222 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits()) 223 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 224 225 llvm_unreachable("Unknown mismatch!"); 226 } 227 228 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, 229 const Twine &ErrMsg) { 230 const Instruction *I = dyn_cast_or_null<Instruction>(V); 231 if (!V) 232 return Ctx.emitError(ErrMsg); 233 234 const char *AsmError = ", possible invalid constraint for vector type"; 235 if (const CallInst *CI = dyn_cast<CallInst>(I)) 236 if (isa<InlineAsm>(CI->getCalledValue())) 237 return Ctx.emitError(I, ErrMsg + AsmError); 238 239 return Ctx.emitError(I, ErrMsg); 240 } 241 242 /// getCopyFromPartsVector - Create a value that contains the specified legal 243 /// parts combined into the value they represent. If the parts combine to a 244 /// type larger then ValueVT then AssertOp can be used to specify whether the 245 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from 246 /// ValueVT (ISD::AssertSext). 247 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL, 248 const SDValue *Parts, unsigned NumParts, 249 MVT PartVT, EVT ValueVT, const Value *V) { 250 assert(ValueVT.isVector() && "Not a vector value"); 251 assert(NumParts > 0 && "No parts to assemble!"); 252 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 253 SDValue Val = Parts[0]; 254 255 // Handle a multi-element vector. 256 if (NumParts > 1) { 257 EVT IntermediateVT; 258 MVT RegisterVT; 259 unsigned NumIntermediates; 260 unsigned NumRegs = 261 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, 262 NumIntermediates, RegisterVT); 263 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 264 NumParts = NumRegs; // Silence a compiler warning. 265 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 266 assert(RegisterVT.getSizeInBits() == 267 Parts[0].getSimpleValueType().getSizeInBits() && 268 "Part type sizes don't match!"); 269 270 // Assemble the parts into intermediate operands. 271 SmallVector<SDValue, 8> Ops(NumIntermediates); 272 if (NumIntermediates == NumParts) { 273 // If the register was not expanded, truncate or copy the value, 274 // as appropriate. 275 for (unsigned i = 0; i != NumParts; ++i) 276 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, 277 PartVT, IntermediateVT, V); 278 } else if (NumParts > 0) { 279 // If the intermediate type was expanded, build the intermediate 280 // operands from the parts. 281 assert(NumParts % NumIntermediates == 0 && 282 "Must expand into a divisible number of parts!"); 283 unsigned Factor = NumParts / NumIntermediates; 284 for (unsigned i = 0; i != NumIntermediates; ++i) 285 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, 286 PartVT, IntermediateVT, V); 287 } 288 289 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the 290 // intermediate operands. 291 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS 292 : ISD::BUILD_VECTOR, 293 DL, ValueVT, Ops); 294 } 295 296 // There is now one part, held in Val. Correct it to match ValueVT. 297 EVT PartEVT = Val.getValueType(); 298 299 if (PartEVT == ValueVT) 300 return Val; 301 302 if (PartEVT.isVector()) { 303 // If the element type of the source/dest vectors are the same, but the 304 // parts vector has more elements than the value vector, then we have a 305 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the 306 // elements we want. 307 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) { 308 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() && 309 "Cannot narrow, it would be a lossy transformation"); 310 return DAG.getNode( 311 ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val, 312 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); 313 } 314 315 // Vector/Vector bitcast. 316 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) 317 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 318 319 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() && 320 "Cannot handle this kind of promotion"); 321 // Promoted vector extract 322 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT); 323 324 } 325 326 // Trivial bitcast if the types are the same size and the destination 327 // vector type is legal. 328 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() && 329 TLI.isTypeLegal(ValueVT)) 330 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 331 332 // Handle cases such as i8 -> <1 x i1> 333 if (ValueVT.getVectorNumElements() != 1) { 334 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V, 335 "non-trivial scalar-to-vector conversion"); 336 return DAG.getUNDEF(ValueVT); 337 } 338 339 if (ValueVT.getVectorNumElements() == 1 && 340 ValueVT.getVectorElementType() != PartEVT) 341 Val = DAG.getAnyExtOrTrunc(Val, DL, ValueVT.getScalarType()); 342 343 return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val); 344 } 345 346 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc dl, 347 SDValue Val, SDValue *Parts, unsigned NumParts, 348 MVT PartVT, const Value *V); 349 350 /// getCopyToParts - Create a series of nodes that contain the specified value 351 /// split into legal parts. If the parts contain more bits than Val, then, for 352 /// integers, ExtendKind can be used to specify how to generate the extra bits. 353 static void getCopyToParts(SelectionDAG &DAG, SDLoc DL, 354 SDValue Val, SDValue *Parts, unsigned NumParts, 355 MVT PartVT, const Value *V, 356 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { 357 EVT ValueVT = Val.getValueType(); 358 359 // Handle the vector case separately. 360 if (ValueVT.isVector()) 361 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V); 362 363 unsigned PartBits = PartVT.getSizeInBits(); 364 unsigned OrigNumParts = NumParts; 365 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && 366 "Copying to an illegal type!"); 367 368 if (NumParts == 0) 369 return; 370 371 assert(!ValueVT.isVector() && "Vector case handled elsewhere"); 372 EVT PartEVT = PartVT; 373 if (PartEVT == ValueVT) { 374 assert(NumParts == 1 && "No-op copy with multiple parts!"); 375 Parts[0] = Val; 376 return; 377 } 378 379 if (NumParts * PartBits > ValueVT.getSizeInBits()) { 380 // If the parts cover more bits than the value has, promote the value. 381 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 382 assert(NumParts == 1 && "Do not know what to promote to!"); 383 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val); 384 } else { 385 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && 386 ValueVT.isInteger() && 387 "Unknown mismatch!"); 388 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 389 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val); 390 if (PartVT == MVT::x86mmx) 391 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 392 } 393 } else if (PartBits == ValueVT.getSizeInBits()) { 394 // Different types of the same size. 395 assert(NumParts == 1 && PartEVT != ValueVT); 396 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 397 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) { 398 // If the parts cover less bits than value has, truncate the value. 399 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && 400 ValueVT.isInteger() && 401 "Unknown mismatch!"); 402 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 403 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 404 if (PartVT == MVT::x86mmx) 405 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 406 } 407 408 // The value may have changed - recompute ValueVT. 409 ValueVT = Val.getValueType(); 410 assert(NumParts * PartBits == ValueVT.getSizeInBits() && 411 "Failed to tile the value with PartVT!"); 412 413 if (NumParts == 1) { 414 if (PartEVT != ValueVT) 415 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V, 416 "scalar-to-vector conversion failed"); 417 418 Parts[0] = Val; 419 return; 420 } 421 422 // Expand the value into multiple parts. 423 if (NumParts & (NumParts - 1)) { 424 // The number of parts is not a power of 2. Split off and copy the tail. 425 assert(PartVT.isInteger() && ValueVT.isInteger() && 426 "Do not know what to expand to!"); 427 unsigned RoundParts = 1 << Log2_32(NumParts); 428 unsigned RoundBits = RoundParts * PartBits; 429 unsigned OddParts = NumParts - RoundParts; 430 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val, 431 DAG.getIntPtrConstant(RoundBits, DL)); 432 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V); 433 434 if (DAG.getDataLayout().isBigEndian()) 435 // The odd parts were reversed by getCopyToParts - unreverse them. 436 std::reverse(Parts + RoundParts, Parts + NumParts); 437 438 NumParts = RoundParts; 439 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 440 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 441 } 442 443 // The number of parts is a power of 2. Repeatedly bisect the value using 444 // EXTRACT_ELEMENT. 445 Parts[0] = DAG.getNode(ISD::BITCAST, DL, 446 EVT::getIntegerVT(*DAG.getContext(), 447 ValueVT.getSizeInBits()), 448 Val); 449 450 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) { 451 for (unsigned i = 0; i < NumParts; i += StepSize) { 452 unsigned ThisBits = StepSize * PartBits / 2; 453 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits); 454 SDValue &Part0 = Parts[i]; 455 SDValue &Part1 = Parts[i+StepSize/2]; 456 457 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, 458 ThisVT, Part0, DAG.getIntPtrConstant(1, DL)); 459 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, 460 ThisVT, Part0, DAG.getIntPtrConstant(0, DL)); 461 462 if (ThisBits == PartBits && ThisVT != PartVT) { 463 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0); 464 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1); 465 } 466 } 467 } 468 469 if (DAG.getDataLayout().isBigEndian()) 470 std::reverse(Parts, Parts + OrigNumParts); 471 } 472 473 474 /// getCopyToPartsVector - Create a series of nodes that contain the specified 475 /// value split into legal parts. 476 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL, 477 SDValue Val, SDValue *Parts, unsigned NumParts, 478 MVT PartVT, const Value *V) { 479 EVT ValueVT = Val.getValueType(); 480 assert(ValueVT.isVector() && "Not a vector"); 481 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 482 483 if (NumParts == 1) { 484 EVT PartEVT = PartVT; 485 if (PartEVT == ValueVT) { 486 // Nothing to do. 487 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) { 488 // Bitconvert vector->vector case. 489 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 490 } else if (PartVT.isVector() && 491 PartEVT.getVectorElementType() == ValueVT.getVectorElementType() && 492 PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) { 493 EVT ElementVT = PartVT.getVectorElementType(); 494 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in 495 // undef elements. 496 SmallVector<SDValue, 16> Ops; 497 for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i) 498 Ops.push_back(DAG.getNode( 499 ISD::EXTRACT_VECTOR_ELT, DL, ElementVT, Val, 500 DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout())))); 501 502 for (unsigned i = ValueVT.getVectorNumElements(), 503 e = PartVT.getVectorNumElements(); i != e; ++i) 504 Ops.push_back(DAG.getUNDEF(ElementVT)); 505 506 Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, Ops); 507 508 // FIXME: Use CONCAT for 2x -> 4x. 509 510 //SDValue UndefElts = DAG.getUNDEF(VectorTy); 511 //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts); 512 } else if (PartVT.isVector() && 513 PartEVT.getVectorElementType().bitsGE( 514 ValueVT.getVectorElementType()) && 515 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) { 516 517 // Promoted vector extract 518 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); 519 } else{ 520 // Vector -> scalar conversion. 521 assert(ValueVT.getVectorNumElements() == 1 && 522 "Only trivial vector-to-scalar conversions should get here!"); 523 Val = DAG.getNode( 524 ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val, 525 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); 526 527 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); 528 } 529 530 Parts[0] = Val; 531 return; 532 } 533 534 // Handle a multi-element vector. 535 EVT IntermediateVT; 536 MVT RegisterVT; 537 unsigned NumIntermediates; 538 unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, 539 IntermediateVT, 540 NumIntermediates, RegisterVT); 541 unsigned NumElements = ValueVT.getVectorNumElements(); 542 543 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 544 NumParts = NumRegs; // Silence a compiler warning. 545 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 546 547 // Split the vector into intermediate operands. 548 SmallVector<SDValue, 8> Ops(NumIntermediates); 549 for (unsigned i = 0; i != NumIntermediates; ++i) { 550 if (IntermediateVT.isVector()) 551 Ops[i] = 552 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val, 553 DAG.getConstant(i * (NumElements / NumIntermediates), DL, 554 TLI.getVectorIdxTy(DAG.getDataLayout()))); 555 else 556 Ops[i] = DAG.getNode( 557 ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val, 558 DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); 559 } 560 561 // Split the intermediate operands into legal parts. 562 if (NumParts == NumIntermediates) { 563 // If the register was not expanded, promote or copy the value, 564 // as appropriate. 565 for (unsigned i = 0; i != NumParts; ++i) 566 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V); 567 } else if (NumParts > 0) { 568 // If the intermediate type was expanded, split each the value into 569 // legal parts. 570 assert(NumIntermediates != 0 && "division by zero"); 571 assert(NumParts % NumIntermediates == 0 && 572 "Must expand into a divisible number of parts!"); 573 unsigned Factor = NumParts / NumIntermediates; 574 for (unsigned i = 0; i != NumIntermediates; ++i) 575 getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V); 576 } 577 } 578 579 RegsForValue::RegsForValue() {} 580 581 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, 582 EVT valuevt) 583 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {} 584 585 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI, 586 const DataLayout &DL, unsigned Reg, Type *Ty) { 587 ComputeValueVTs(TLI, DL, Ty, ValueVTs); 588 589 for (EVT ValueVT : ValueVTs) { 590 unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT); 591 MVT RegisterVT = TLI.getRegisterType(Context, ValueVT); 592 for (unsigned i = 0; i != NumRegs; ++i) 593 Regs.push_back(Reg + i); 594 RegVTs.push_back(RegisterVT); 595 Reg += NumRegs; 596 } 597 } 598 599 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from 600 /// this value and returns the result as a ValueVT value. This uses 601 /// Chain/Flag as the input and updates them for the output Chain/Flag. 602 /// If the Flag pointer is NULL, no flag is used. 603 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 604 FunctionLoweringInfo &FuncInfo, 605 SDLoc dl, 606 SDValue &Chain, SDValue *Flag, 607 const Value *V) const { 608 // A Value with type {} or [0 x %t] needs no registers. 609 if (ValueVTs.empty()) 610 return SDValue(); 611 612 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 613 614 // Assemble the legal parts into the final values. 615 SmallVector<SDValue, 4> Values(ValueVTs.size()); 616 SmallVector<SDValue, 8> Parts; 617 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 618 // Copy the legal parts from the registers. 619 EVT ValueVT = ValueVTs[Value]; 620 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT); 621 MVT RegisterVT = RegVTs[Value]; 622 623 Parts.resize(NumRegs); 624 for (unsigned i = 0; i != NumRegs; ++i) { 625 SDValue P; 626 if (!Flag) { 627 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT); 628 } else { 629 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag); 630 *Flag = P.getValue(2); 631 } 632 633 Chain = P.getValue(1); 634 Parts[i] = P; 635 636 // If the source register was virtual and if we know something about it, 637 // add an assert node. 638 if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) || 639 !RegisterVT.isInteger() || RegisterVT.isVector()) 640 continue; 641 642 const FunctionLoweringInfo::LiveOutInfo *LOI = 643 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]); 644 if (!LOI) 645 continue; 646 647 unsigned RegSize = RegisterVT.getSizeInBits(); 648 unsigned NumSignBits = LOI->NumSignBits; 649 unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes(); 650 651 if (NumZeroBits == RegSize) { 652 // The current value is a zero. 653 // Explicitly express that as it would be easier for 654 // optimizations to kick in. 655 Parts[i] = DAG.getConstant(0, dl, RegisterVT); 656 continue; 657 } 658 659 // FIXME: We capture more information than the dag can represent. For 660 // now, just use the tightest assertzext/assertsext possible. 661 bool isSExt = true; 662 EVT FromVT(MVT::Other); 663 if (NumSignBits == RegSize) 664 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1 665 else if (NumZeroBits >= RegSize-1) 666 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1 667 else if (NumSignBits > RegSize-8) 668 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8 669 else if (NumZeroBits >= RegSize-8) 670 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8 671 else if (NumSignBits > RegSize-16) 672 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16 673 else if (NumZeroBits >= RegSize-16) 674 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16 675 else if (NumSignBits > RegSize-32) 676 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32 677 else if (NumZeroBits >= RegSize-32) 678 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32 679 else 680 continue; 681 682 // Add an assertion node. 683 assert(FromVT != MVT::Other); 684 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl, 685 RegisterVT, P, DAG.getValueType(FromVT)); 686 } 687 688 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), 689 NumRegs, RegisterVT, ValueVT, V); 690 Part += NumRegs; 691 Parts.clear(); 692 } 693 694 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values); 695 } 696 697 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the 698 /// specified value into the registers specified by this object. This uses 699 /// Chain/Flag as the input and updates them for the output Chain/Flag. 700 /// If the Flag pointer is NULL, no flag is used. 701 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl, 702 SDValue &Chain, SDValue *Flag, const Value *V, 703 ISD::NodeType PreferredExtendType) const { 704 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 705 ISD::NodeType ExtendKind = PreferredExtendType; 706 707 // Get the list of the values's legal parts. 708 unsigned NumRegs = Regs.size(); 709 SmallVector<SDValue, 8> Parts(NumRegs); 710 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 711 EVT ValueVT = ValueVTs[Value]; 712 unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT); 713 MVT RegisterVT = RegVTs[Value]; 714 715 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT)) 716 ExtendKind = ISD::ZERO_EXTEND; 717 718 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), 719 &Parts[Part], NumParts, RegisterVT, V, ExtendKind); 720 Part += NumParts; 721 } 722 723 // Copy the parts into the registers. 724 SmallVector<SDValue, 8> Chains(NumRegs); 725 for (unsigned i = 0; i != NumRegs; ++i) { 726 SDValue Part; 727 if (!Flag) { 728 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]); 729 } else { 730 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag); 731 *Flag = Part.getValue(1); 732 } 733 734 Chains[i] = Part.getValue(0); 735 } 736 737 if (NumRegs == 1 || Flag) 738 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is 739 // flagged to it. That is the CopyToReg nodes and the user are considered 740 // a single scheduling unit. If we create a TokenFactor and return it as 741 // chain, then the TokenFactor is both a predecessor (operand) of the 742 // user as well as a successor (the TF operands are flagged to the user). 743 // c1, f1 = CopyToReg 744 // c2, f2 = CopyToReg 745 // c3 = TokenFactor c1, c2 746 // ... 747 // = op c3, ..., f2 748 Chain = Chains[NumRegs-1]; 749 else 750 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 751 } 752 753 /// AddInlineAsmOperands - Add this value to the specified inlineasm node 754 /// operand list. This adds the code marker and includes the number of 755 /// values added into it. 756 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching, 757 unsigned MatchingIdx, SDLoc dl, 758 SelectionDAG &DAG, 759 std::vector<SDValue> &Ops) const { 760 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 761 762 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size()); 763 if (HasMatching) 764 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx); 765 else if (!Regs.empty() && 766 TargetRegisterInfo::isVirtualRegister(Regs.front())) { 767 // Put the register class of the virtual registers in the flag word. That 768 // way, later passes can recompute register class constraints for inline 769 // assembly as well as normal instructions. 770 // Don't do this for tied operands that can use the regclass information 771 // from the def. 772 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 773 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front()); 774 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); 775 } 776 777 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32); 778 Ops.push_back(Res); 779 780 unsigned SP = TLI.getStackPointerRegisterToSaveRestore(); 781 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { 782 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]); 783 MVT RegisterVT = RegVTs[Value]; 784 for (unsigned i = 0; i != NumRegs; ++i) { 785 assert(Reg < Regs.size() && "Mismatch in # registers expected"); 786 unsigned TheReg = Regs[Reg++]; 787 Ops.push_back(DAG.getRegister(TheReg, RegisterVT)); 788 789 if (TheReg == SP && Code == InlineAsm::Kind_Clobber) { 790 // If we clobbered the stack pointer, MFI should know about it. 791 assert(DAG.getMachineFunction().getFrameInfo()-> 792 hasOpaqueSPAdjustment()); 793 } 794 } 795 } 796 } 797 798 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa, 799 const TargetLibraryInfo *li) { 800 AA = &aa; 801 GFI = gfi; 802 LibInfo = li; 803 DL = &DAG.getDataLayout(); 804 Context = DAG.getContext(); 805 LPadToCallSiteMap.clear(); 806 } 807 808 /// clear - Clear out the current SelectionDAG and the associated 809 /// state and prepare this SelectionDAGBuilder object to be used 810 /// for a new block. This doesn't clear out information about 811 /// additional blocks that are needed to complete switch lowering 812 /// or PHI node updating; that information is cleared out as it is 813 /// consumed. 814 void SelectionDAGBuilder::clear() { 815 NodeMap.clear(); 816 UnusedArgNodeMap.clear(); 817 PendingLoads.clear(); 818 PendingExports.clear(); 819 CurInst = nullptr; 820 HasTailCall = false; 821 SDNodeOrder = LowestSDNodeOrder; 822 StatepointLowering.clear(); 823 } 824 825 /// clearDanglingDebugInfo - Clear the dangling debug information 826 /// map. This function is separated from the clear so that debug 827 /// information that is dangling in a basic block can be properly 828 /// resolved in a different basic block. This allows the 829 /// SelectionDAG to resolve dangling debug information attached 830 /// to PHI nodes. 831 void SelectionDAGBuilder::clearDanglingDebugInfo() { 832 DanglingDebugInfoMap.clear(); 833 } 834 835 /// getRoot - Return the current virtual root of the Selection DAG, 836 /// flushing any PendingLoad items. This must be done before emitting 837 /// a store or any other node that may need to be ordered after any 838 /// prior load instructions. 839 /// 840 SDValue SelectionDAGBuilder::getRoot() { 841 if (PendingLoads.empty()) 842 return DAG.getRoot(); 843 844 if (PendingLoads.size() == 1) { 845 SDValue Root = PendingLoads[0]; 846 DAG.setRoot(Root); 847 PendingLoads.clear(); 848 return Root; 849 } 850 851 // Otherwise, we have to make a token factor node. 852 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, 853 PendingLoads); 854 PendingLoads.clear(); 855 DAG.setRoot(Root); 856 return Root; 857 } 858 859 /// getControlRoot - Similar to getRoot, but instead of flushing all the 860 /// PendingLoad items, flush all the PendingExports items. It is necessary 861 /// to do this before emitting a terminator instruction. 862 /// 863 SDValue SelectionDAGBuilder::getControlRoot() { 864 SDValue Root = DAG.getRoot(); 865 866 if (PendingExports.empty()) 867 return Root; 868 869 // Turn all of the CopyToReg chains into one factored node. 870 if (Root.getOpcode() != ISD::EntryToken) { 871 unsigned i = 0, e = PendingExports.size(); 872 for (; i != e; ++i) { 873 assert(PendingExports[i].getNode()->getNumOperands() > 1); 874 if (PendingExports[i].getNode()->getOperand(0) == Root) 875 break; // Don't add the root if we already indirectly depend on it. 876 } 877 878 if (i == e) 879 PendingExports.push_back(Root); 880 } 881 882 Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, 883 PendingExports); 884 PendingExports.clear(); 885 DAG.setRoot(Root); 886 return Root; 887 } 888 889 void SelectionDAGBuilder::visit(const Instruction &I) { 890 // Set up outgoing PHI node register values before emitting the terminator. 891 if (isa<TerminatorInst>(&I)) 892 HandlePHINodesInSuccessorBlocks(I.getParent()); 893 894 ++SDNodeOrder; 895 896 CurInst = &I; 897 898 visit(I.getOpcode(), I); 899 900 if (!isa<TerminatorInst>(&I) && !HasTailCall) 901 CopyToExportRegsIfNeeded(&I); 902 903 CurInst = nullptr; 904 } 905 906 void SelectionDAGBuilder::visitPHI(const PHINode &) { 907 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!"); 908 } 909 910 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) { 911 // Note: this doesn't use InstVisitor, because it has to work with 912 // ConstantExpr's in addition to instructions. 913 switch (Opcode) { 914 default: llvm_unreachable("Unknown instruction type encountered!"); 915 // Build the switch statement using the Instruction.def file. 916 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 917 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break; 918 #include "llvm/IR/Instruction.def" 919 } 920 } 921 922 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V, 923 // generate the debug data structures now that we've seen its definition. 924 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V, 925 SDValue Val) { 926 DanglingDebugInfo &DDI = DanglingDebugInfoMap[V]; 927 if (DDI.getDI()) { 928 const DbgValueInst *DI = DDI.getDI(); 929 DebugLoc dl = DDI.getdl(); 930 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder(); 931 DILocalVariable *Variable = DI->getVariable(); 932 DIExpression *Expr = DI->getExpression(); 933 assert(Variable->isValidLocationForIntrinsic(dl) && 934 "Expected inlined-at fields to agree"); 935 uint64_t Offset = DI->getOffset(); 936 // A dbg.value for an alloca is always indirect. 937 bool IsIndirect = isa<AllocaInst>(V) || Offset != 0; 938 SDDbgValue *SDV; 939 if (Val.getNode()) { 940 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, Offset, IsIndirect, 941 Val)) { 942 SDV = DAG.getDbgValue(Variable, Expr, Val.getNode(), Val.getResNo(), 943 IsIndirect, Offset, dl, DbgSDNodeOrder); 944 DAG.AddDbgValue(SDV, Val.getNode(), false); 945 } 946 } else 947 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 948 DanglingDebugInfoMap[V] = DanglingDebugInfo(); 949 } 950 } 951 952 /// getCopyFromRegs - If there was virtual register allocated for the value V 953 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise. 954 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) { 955 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V); 956 SDValue Result; 957 958 if (It != FuncInfo.ValueMap.end()) { 959 unsigned InReg = It->second; 960 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), 961 DAG.getDataLayout(), InReg, Ty); 962 SDValue Chain = DAG.getEntryNode(); 963 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); 964 resolveDanglingDebugInfo(V, Result); 965 } 966 967 return Result; 968 } 969 970 /// getValue - Return an SDValue for the given Value. 971 SDValue SelectionDAGBuilder::getValue(const Value *V) { 972 // If we already have an SDValue for this value, use it. It's important 973 // to do this first, so that we don't create a CopyFromReg if we already 974 // have a regular SDValue. 975 SDValue &N = NodeMap[V]; 976 if (N.getNode()) return N; 977 978 // If there's a virtual register allocated and initialized for this 979 // value, use it. 980 SDValue copyFromReg = getCopyFromRegs(V, V->getType()); 981 if (copyFromReg.getNode()) { 982 return copyFromReg; 983 } 984 985 // Otherwise create a new SDValue and remember it. 986 SDValue Val = getValueImpl(V); 987 NodeMap[V] = Val; 988 resolveDanglingDebugInfo(V, Val); 989 return Val; 990 } 991 992 // Return true if SDValue exists for the given Value 993 bool SelectionDAGBuilder::findValue(const Value *V) const { 994 return (NodeMap.find(V) != NodeMap.end()) || 995 (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end()); 996 } 997 998 /// getNonRegisterValue - Return an SDValue for the given Value, but 999 /// don't look in FuncInfo.ValueMap for a virtual register. 1000 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) { 1001 // If we already have an SDValue for this value, use it. 1002 SDValue &N = NodeMap[V]; 1003 if (N.getNode()) { 1004 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) { 1005 // Remove the debug location from the node as the node is about to be used 1006 // in a location which may differ from the original debug location. This 1007 // is relevant to Constant and ConstantFP nodes because they can appear 1008 // as constant expressions inside PHI nodes. 1009 N->setDebugLoc(DebugLoc()); 1010 } 1011 return N; 1012 } 1013 1014 // Otherwise create a new SDValue and remember it. 1015 SDValue Val = getValueImpl(V); 1016 NodeMap[V] = Val; 1017 resolveDanglingDebugInfo(V, Val); 1018 return Val; 1019 } 1020 1021 /// getValueImpl - Helper function for getValue and getNonRegisterValue. 1022 /// Create an SDValue for the given value. 1023 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { 1024 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1025 1026 if (const Constant *C = dyn_cast<Constant>(V)) { 1027 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true); 1028 1029 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1030 return DAG.getConstant(*CI, getCurSDLoc(), VT); 1031 1032 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 1033 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT); 1034 1035 if (isa<ConstantPointerNull>(C)) { 1036 unsigned AS = V->getType()->getPointerAddressSpace(); 1037 return DAG.getConstant(0, getCurSDLoc(), 1038 TLI.getPointerTy(DAG.getDataLayout(), AS)); 1039 } 1040 1041 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 1042 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT); 1043 1044 if (isa<UndefValue>(C) && !V->getType()->isAggregateType()) 1045 return DAG.getUNDEF(VT); 1046 1047 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 1048 visit(CE->getOpcode(), *CE); 1049 SDValue N1 = NodeMap[V]; 1050 assert(N1.getNode() && "visit didn't populate the NodeMap!"); 1051 return N1; 1052 } 1053 1054 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) { 1055 SmallVector<SDValue, 4> Constants; 1056 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end(); 1057 OI != OE; ++OI) { 1058 SDNode *Val = getValue(*OI).getNode(); 1059 // If the operand is an empty aggregate, there are no values. 1060 if (!Val) continue; 1061 // Add each leaf value from the operand to the Constants list 1062 // to form a flattened list of all the values. 1063 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1064 Constants.push_back(SDValue(Val, i)); 1065 } 1066 1067 return DAG.getMergeValues(Constants, getCurSDLoc()); 1068 } 1069 1070 if (const ConstantDataSequential *CDS = 1071 dyn_cast<ConstantDataSequential>(C)) { 1072 SmallVector<SDValue, 4> Ops; 1073 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1074 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode(); 1075 // Add each leaf value from the operand to the Constants list 1076 // to form a flattened list of all the values. 1077 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1078 Ops.push_back(SDValue(Val, i)); 1079 } 1080 1081 if (isa<ArrayType>(CDS->getType())) 1082 return DAG.getMergeValues(Ops, getCurSDLoc()); 1083 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), 1084 VT, Ops); 1085 } 1086 1087 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) { 1088 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && 1089 "Unknown struct or array constant!"); 1090 1091 SmallVector<EVT, 4> ValueVTs; 1092 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs); 1093 unsigned NumElts = ValueVTs.size(); 1094 if (NumElts == 0) 1095 return SDValue(); // empty struct 1096 SmallVector<SDValue, 4> Constants(NumElts); 1097 for (unsigned i = 0; i != NumElts; ++i) { 1098 EVT EltVT = ValueVTs[i]; 1099 if (isa<UndefValue>(C)) 1100 Constants[i] = DAG.getUNDEF(EltVT); 1101 else if (EltVT.isFloatingPoint()) 1102 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT); 1103 else 1104 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT); 1105 } 1106 1107 return DAG.getMergeValues(Constants, getCurSDLoc()); 1108 } 1109 1110 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) 1111 return DAG.getBlockAddress(BA, VT); 1112 1113 VectorType *VecTy = cast<VectorType>(V->getType()); 1114 unsigned NumElements = VecTy->getNumElements(); 1115 1116 // Now that we know the number and type of the elements, get that number of 1117 // elements into the Ops array based on what kind of constant it is. 1118 SmallVector<SDValue, 16> Ops; 1119 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) { 1120 for (unsigned i = 0; i != NumElements; ++i) 1121 Ops.push_back(getValue(CV->getOperand(i))); 1122 } else { 1123 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!"); 1124 EVT EltVT = 1125 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType()); 1126 1127 SDValue Op; 1128 if (EltVT.isFloatingPoint()) 1129 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT); 1130 else 1131 Op = DAG.getConstant(0, getCurSDLoc(), EltVT); 1132 Ops.assign(NumElements, Op); 1133 } 1134 1135 // Create a BUILD_VECTOR node. 1136 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops); 1137 } 1138 1139 // If this is a static alloca, generate it as the frameindex instead of 1140 // computation. 1141 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1142 DenseMap<const AllocaInst*, int>::iterator SI = 1143 FuncInfo.StaticAllocaMap.find(AI); 1144 if (SI != FuncInfo.StaticAllocaMap.end()) 1145 return DAG.getFrameIndex(SI->second, 1146 TLI.getPointerTy(DAG.getDataLayout())); 1147 } 1148 1149 // If this is an instruction which fast-isel has deferred, select it now. 1150 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 1151 unsigned InReg = FuncInfo.InitializeRegForValue(Inst); 1152 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, 1153 Inst->getType()); 1154 SDValue Chain = DAG.getEntryNode(); 1155 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); 1156 } 1157 1158 llvm_unreachable("Can't get register for value!"); 1159 } 1160 1161 void SelectionDAGBuilder::visitRet(const ReturnInst &I) { 1162 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1163 auto &DL = DAG.getDataLayout(); 1164 SDValue Chain = getControlRoot(); 1165 SmallVector<ISD::OutputArg, 8> Outs; 1166 SmallVector<SDValue, 8> OutVals; 1167 1168 if (!FuncInfo.CanLowerReturn) { 1169 unsigned DemoteReg = FuncInfo.DemoteRegister; 1170 const Function *F = I.getParent()->getParent(); 1171 1172 // Emit a store of the return value through the virtual register. 1173 // Leave Outs empty so that LowerReturn won't try to load return 1174 // registers the usual way. 1175 SmallVector<EVT, 1> PtrValueVTs; 1176 ComputeValueVTs(TLI, DL, PointerType::getUnqual(F->getReturnType()), 1177 PtrValueVTs); 1178 1179 SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]); 1180 SDValue RetOp = getValue(I.getOperand(0)); 1181 1182 SmallVector<EVT, 4> ValueVTs; 1183 SmallVector<uint64_t, 4> Offsets; 1184 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets); 1185 unsigned NumValues = ValueVTs.size(); 1186 1187 SmallVector<SDValue, 4> Chains(NumValues); 1188 for (unsigned i = 0; i != NumValues; ++i) { 1189 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), 1190 RetPtr.getValueType(), RetPtr, 1191 DAG.getIntPtrConstant(Offsets[i], 1192 getCurSDLoc())); 1193 Chains[i] = 1194 DAG.getStore(Chain, getCurSDLoc(), 1195 SDValue(RetOp.getNode(), RetOp.getResNo() + i), 1196 // FIXME: better loc info would be nice. 1197 Add, MachinePointerInfo(), false, false, 0); 1198 } 1199 1200 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), 1201 MVT::Other, Chains); 1202 } else if (I.getNumOperands() != 0) { 1203 SmallVector<EVT, 4> ValueVTs; 1204 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs); 1205 unsigned NumValues = ValueVTs.size(); 1206 if (NumValues) { 1207 SDValue RetOp = getValue(I.getOperand(0)); 1208 1209 const Function *F = I.getParent()->getParent(); 1210 1211 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1212 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 1213 Attribute::SExt)) 1214 ExtendKind = ISD::SIGN_EXTEND; 1215 else if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 1216 Attribute::ZExt)) 1217 ExtendKind = ISD::ZERO_EXTEND; 1218 1219 LLVMContext &Context = F->getContext(); 1220 bool RetInReg = F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 1221 Attribute::InReg); 1222 1223 for (unsigned j = 0; j != NumValues; ++j) { 1224 EVT VT = ValueVTs[j]; 1225 1226 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) 1227 VT = TLI.getTypeForExtArgOrReturn(Context, VT, ExtendKind); 1228 1229 unsigned NumParts = TLI.getNumRegisters(Context, VT); 1230 MVT PartVT = TLI.getRegisterType(Context, VT); 1231 SmallVector<SDValue, 4> Parts(NumParts); 1232 getCopyToParts(DAG, getCurSDLoc(), 1233 SDValue(RetOp.getNode(), RetOp.getResNo() + j), 1234 &Parts[0], NumParts, PartVT, &I, ExtendKind); 1235 1236 // 'inreg' on function refers to return value 1237 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1238 if (RetInReg) 1239 Flags.setInReg(); 1240 1241 // Propagate extension type if any 1242 if (ExtendKind == ISD::SIGN_EXTEND) 1243 Flags.setSExt(); 1244 else if (ExtendKind == ISD::ZERO_EXTEND) 1245 Flags.setZExt(); 1246 1247 for (unsigned i = 0; i < NumParts; ++i) { 1248 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(), 1249 VT, /*isfixed=*/true, 0, 0)); 1250 OutVals.push_back(Parts[i]); 1251 } 1252 } 1253 } 1254 } 1255 1256 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 1257 CallingConv::ID CallConv = 1258 DAG.getMachineFunction().getFunction()->getCallingConv(); 1259 Chain = DAG.getTargetLoweringInfo().LowerReturn( 1260 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG); 1261 1262 // Verify that the target's LowerReturn behaved as expected. 1263 assert(Chain.getNode() && Chain.getValueType() == MVT::Other && 1264 "LowerReturn didn't return a valid chain!"); 1265 1266 // Update the DAG with the new chain value resulting from return lowering. 1267 DAG.setRoot(Chain); 1268 } 1269 1270 /// CopyToExportRegsIfNeeded - If the given value has virtual registers 1271 /// created for it, emit nodes to copy the value into the virtual 1272 /// registers. 1273 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) { 1274 // Skip empty types 1275 if (V->getType()->isEmptyTy()) 1276 return; 1277 1278 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V); 1279 if (VMI != FuncInfo.ValueMap.end()) { 1280 assert(!V->use_empty() && "Unused value assigned virtual registers!"); 1281 CopyValueToVirtualRegister(V, VMI->second); 1282 } 1283 } 1284 1285 /// ExportFromCurrentBlock - If this condition isn't known to be exported from 1286 /// the current basic block, add it to ValueMap now so that we'll get a 1287 /// CopyTo/FromReg. 1288 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) { 1289 // No need to export constants. 1290 if (!isa<Instruction>(V) && !isa<Argument>(V)) return; 1291 1292 // Already exported? 1293 if (FuncInfo.isExportedInst(V)) return; 1294 1295 unsigned Reg = FuncInfo.InitializeRegForValue(V); 1296 CopyValueToVirtualRegister(V, Reg); 1297 } 1298 1299 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V, 1300 const BasicBlock *FromBB) { 1301 // The operands of the setcc have to be in this block. We don't know 1302 // how to export them from some other block. 1303 if (const Instruction *VI = dyn_cast<Instruction>(V)) { 1304 // Can export from current BB. 1305 if (VI->getParent() == FromBB) 1306 return true; 1307 1308 // Is already exported, noop. 1309 return FuncInfo.isExportedInst(V); 1310 } 1311 1312 // If this is an argument, we can export it if the BB is the entry block or 1313 // if it is already exported. 1314 if (isa<Argument>(V)) { 1315 if (FromBB == &FromBB->getParent()->getEntryBlock()) 1316 return true; 1317 1318 // Otherwise, can only export this if it is already exported. 1319 return FuncInfo.isExportedInst(V); 1320 } 1321 1322 // Otherwise, constants can always be exported. 1323 return true; 1324 } 1325 1326 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks. 1327 uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src, 1328 const MachineBasicBlock *Dst) const { 1329 BranchProbabilityInfo *BPI = FuncInfo.BPI; 1330 if (!BPI) 1331 return 0; 1332 const BasicBlock *SrcBB = Src->getBasicBlock(); 1333 const BasicBlock *DstBB = Dst->getBasicBlock(); 1334 return BPI->getEdgeWeight(SrcBB, DstBB); 1335 } 1336 1337 void SelectionDAGBuilder:: 1338 addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst, 1339 uint32_t Weight /* = 0 */) { 1340 if (!Weight) 1341 Weight = getEdgeWeight(Src, Dst); 1342 Src->addSuccessor(Dst, Weight); 1343 } 1344 1345 1346 static bool InBlock(const Value *V, const BasicBlock *BB) { 1347 if (const Instruction *I = dyn_cast<Instruction>(V)) 1348 return I->getParent() == BB; 1349 return true; 1350 } 1351 1352 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions. 1353 /// This function emits a branch and is used at the leaves of an OR or an 1354 /// AND operator tree. 1355 /// 1356 void 1357 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond, 1358 MachineBasicBlock *TBB, 1359 MachineBasicBlock *FBB, 1360 MachineBasicBlock *CurBB, 1361 MachineBasicBlock *SwitchBB, 1362 uint32_t TWeight, 1363 uint32_t FWeight) { 1364 const BasicBlock *BB = CurBB->getBasicBlock(); 1365 1366 // If the leaf of the tree is a comparison, merge the condition into 1367 // the caseblock. 1368 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) { 1369 // The operands of the cmp have to be in this block. We don't know 1370 // how to export them from some other block. If this is the first block 1371 // of the sequence, no exporting is needed. 1372 if (CurBB == SwitchBB || 1373 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && 1374 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) { 1375 ISD::CondCode Condition; 1376 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 1377 Condition = getICmpCondCode(IC->getPredicate()); 1378 } else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) { 1379 Condition = getFCmpCondCode(FC->getPredicate()); 1380 if (TM.Options.NoNaNsFPMath) 1381 Condition = getFCmpCodeWithoutNaN(Condition); 1382 } else { 1383 (void)Condition; // silence warning. 1384 llvm_unreachable("Unknown compare instruction"); 1385 } 1386 1387 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr, 1388 TBB, FBB, CurBB, TWeight, FWeight); 1389 SwitchCases.push_back(CB); 1390 return; 1391 } 1392 } 1393 1394 // Create a CaseBlock record representing this branch. 1395 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()), 1396 nullptr, TBB, FBB, CurBB, TWeight, FWeight); 1397 SwitchCases.push_back(CB); 1398 } 1399 1400 /// Scale down both weights to fit into uint32_t. 1401 static void ScaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { 1402 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; 1403 uint32_t Scale = (NewMax / UINT32_MAX) + 1; 1404 NewTrue = NewTrue / Scale; 1405 NewFalse = NewFalse / Scale; 1406 } 1407 1408 /// FindMergedConditions - If Cond is an expression like 1409 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond, 1410 MachineBasicBlock *TBB, 1411 MachineBasicBlock *FBB, 1412 MachineBasicBlock *CurBB, 1413 MachineBasicBlock *SwitchBB, 1414 unsigned Opc, uint32_t TWeight, 1415 uint32_t FWeight) { 1416 // If this node is not part of the or/and tree, emit it as a branch. 1417 const Instruction *BOp = dyn_cast<Instruction>(Cond); 1418 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) || 1419 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() || 1420 BOp->getParent() != CurBB->getBasicBlock() || 1421 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) || 1422 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) { 1423 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, 1424 TWeight, FWeight); 1425 return; 1426 } 1427 1428 // Create TmpBB after CurBB. 1429 MachineFunction::iterator BBI = CurBB; 1430 MachineFunction &MF = DAG.getMachineFunction(); 1431 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock()); 1432 CurBB->getParent()->insert(++BBI, TmpBB); 1433 1434 if (Opc == Instruction::Or) { 1435 // Codegen X | Y as: 1436 // BB1: 1437 // jmp_if_X TBB 1438 // jmp TmpBB 1439 // TmpBB: 1440 // jmp_if_Y TBB 1441 // jmp FBB 1442 // 1443 1444 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 1445 // The requirement is that 1446 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 1447 // = TrueProb for original BB. 1448 // Assuming the original weights are A and B, one choice is to set BB1's 1449 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice 1450 // assumes that 1451 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 1452 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 1453 // TmpBB, but the math is more complicated. 1454 1455 uint64_t NewTrueWeight = TWeight; 1456 uint64_t NewFalseWeight = (uint64_t)TWeight + 2 * (uint64_t)FWeight; 1457 ScaleWeights(NewTrueWeight, NewFalseWeight); 1458 // Emit the LHS condition. 1459 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc, 1460 NewTrueWeight, NewFalseWeight); 1461 1462 NewTrueWeight = TWeight; 1463 NewFalseWeight = 2 * (uint64_t)FWeight; 1464 ScaleWeights(NewTrueWeight, NewFalseWeight); 1465 // Emit the RHS condition into TmpBB. 1466 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, 1467 NewTrueWeight, NewFalseWeight); 1468 } else { 1469 assert(Opc == Instruction::And && "Unknown merge op!"); 1470 // Codegen X & Y as: 1471 // BB1: 1472 // jmp_if_X TmpBB 1473 // jmp FBB 1474 // TmpBB: 1475 // jmp_if_Y TBB 1476 // jmp FBB 1477 // 1478 // This requires creation of TmpBB after CurBB. 1479 1480 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 1481 // The requirement is that 1482 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 1483 // = FalseProb for original BB. 1484 // Assuming the original weights are A and B, one choice is to set BB1's 1485 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice 1486 // assumes that 1487 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. 1488 1489 uint64_t NewTrueWeight = 2 * (uint64_t)TWeight + (uint64_t)FWeight; 1490 uint64_t NewFalseWeight = FWeight; 1491 ScaleWeights(NewTrueWeight, NewFalseWeight); 1492 // Emit the LHS condition. 1493 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc, 1494 NewTrueWeight, NewFalseWeight); 1495 1496 NewTrueWeight = 2 * (uint64_t)TWeight; 1497 NewFalseWeight = FWeight; 1498 ScaleWeights(NewTrueWeight, NewFalseWeight); 1499 // Emit the RHS condition into TmpBB. 1500 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, 1501 NewTrueWeight, NewFalseWeight); 1502 } 1503 } 1504 1505 /// If the set of cases should be emitted as a series of branches, return true. 1506 /// If we should emit this as a bunch of and/or'd together conditions, return 1507 /// false. 1508 bool 1509 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) { 1510 if (Cases.size() != 2) return true; 1511 1512 // If this is two comparisons of the same values or'd or and'd together, they 1513 // will get folded into a single comparison, so don't emit two blocks. 1514 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 1515 Cases[0].CmpRHS == Cases[1].CmpRHS) || 1516 (Cases[0].CmpRHS == Cases[1].CmpLHS && 1517 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 1518 return false; 1519 } 1520 1521 // Handle: (X != null) | (Y != null) --> (X|Y) != 0 1522 // Handle: (X == null) & (Y == null) --> (X|Y) == 0 1523 if (Cases[0].CmpRHS == Cases[1].CmpRHS && 1524 Cases[0].CC == Cases[1].CC && 1525 isa<Constant>(Cases[0].CmpRHS) && 1526 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) { 1527 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB) 1528 return false; 1529 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB) 1530 return false; 1531 } 1532 1533 return true; 1534 } 1535 1536 void SelectionDAGBuilder::visitBr(const BranchInst &I) { 1537 MachineBasicBlock *BrMBB = FuncInfo.MBB; 1538 1539 // Update machine-CFG edges. 1540 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 1541 1542 if (I.isUnconditional()) { 1543 // Update machine-CFG edges. 1544 BrMBB->addSuccessor(Succ0MBB); 1545 1546 // If this is not a fall-through branch or optimizations are switched off, 1547 // emit the branch. 1548 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None) 1549 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), 1550 MVT::Other, getControlRoot(), 1551 DAG.getBasicBlock(Succ0MBB))); 1552 1553 return; 1554 } 1555 1556 // If this condition is one of the special cases we handle, do special stuff 1557 // now. 1558 const Value *CondVal = I.getCondition(); 1559 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 1560 1561 // If this is a series of conditions that are or'd or and'd together, emit 1562 // this as a sequence of branches instead of setcc's with and/or operations. 1563 // As long as jumps are not expensive, this should improve performance. 1564 // For example, instead of something like: 1565 // cmp A, B 1566 // C = seteq 1567 // cmp D, E 1568 // F = setle 1569 // or C, F 1570 // jnz foo 1571 // Emit: 1572 // cmp A, B 1573 // je foo 1574 // cmp D, E 1575 // jle foo 1576 // 1577 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) { 1578 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && 1579 BOp->hasOneUse() && (BOp->getOpcode() == Instruction::And || 1580 BOp->getOpcode() == Instruction::Or)) { 1581 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, 1582 BOp->getOpcode(), getEdgeWeight(BrMBB, Succ0MBB), 1583 getEdgeWeight(BrMBB, Succ1MBB)); 1584 // If the compares in later blocks need to use values not currently 1585 // exported from this block, export them now. This block should always 1586 // be the first entry. 1587 assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!"); 1588 1589 // Allow some cases to be rejected. 1590 if (ShouldEmitAsBranches(SwitchCases)) { 1591 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) { 1592 ExportFromCurrentBlock(SwitchCases[i].CmpLHS); 1593 ExportFromCurrentBlock(SwitchCases[i].CmpRHS); 1594 } 1595 1596 // Emit the branch for this block. 1597 visitSwitchCase(SwitchCases[0], BrMBB); 1598 SwitchCases.erase(SwitchCases.begin()); 1599 return; 1600 } 1601 1602 // Okay, we decided not to do this, remove any inserted MBB's and clear 1603 // SwitchCases. 1604 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) 1605 FuncInfo.MF->erase(SwitchCases[i].ThisBB); 1606 1607 SwitchCases.clear(); 1608 } 1609 } 1610 1611 // Create a CaseBlock record representing this branch. 1612 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()), 1613 nullptr, Succ0MBB, Succ1MBB, BrMBB); 1614 1615 // Use visitSwitchCase to actually insert the fast branch sequence for this 1616 // cond branch. 1617 visitSwitchCase(CB, BrMBB); 1618 } 1619 1620 /// visitSwitchCase - Emits the necessary code to represent a single node in 1621 /// the binary search tree resulting from lowering a switch instruction. 1622 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB, 1623 MachineBasicBlock *SwitchBB) { 1624 SDValue Cond; 1625 SDValue CondLHS = getValue(CB.CmpLHS); 1626 SDLoc dl = getCurSDLoc(); 1627 1628 // Build the setcc now. 1629 if (!CB.CmpMHS) { 1630 // Fold "(X == true)" to X and "(X == false)" to !X to 1631 // handle common cases produced by branch lowering. 1632 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) && 1633 CB.CC == ISD::SETEQ) 1634 Cond = CondLHS; 1635 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) && 1636 CB.CC == ISD::SETEQ) { 1637 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType()); 1638 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True); 1639 } else 1640 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC); 1641 } else { 1642 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now"); 1643 1644 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); 1645 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); 1646 1647 SDValue CmpOp = getValue(CB.CmpMHS); 1648 EVT VT = CmpOp.getValueType(); 1649 1650 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 1651 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT), 1652 ISD::SETLE); 1653 } else { 1654 SDValue SUB = DAG.getNode(ISD::SUB, dl, 1655 VT, CmpOp, DAG.getConstant(Low, dl, VT)); 1656 Cond = DAG.getSetCC(dl, MVT::i1, SUB, 1657 DAG.getConstant(High-Low, dl, VT), ISD::SETULE); 1658 } 1659 } 1660 1661 // Update successor info 1662 addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight); 1663 // TrueBB and FalseBB are always different unless the incoming IR is 1664 // degenerate. This only happens when running llc on weird IR. 1665 if (CB.TrueBB != CB.FalseBB) 1666 addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight); 1667 1668 // If the lhs block is the next block, invert the condition so that we can 1669 // fall through to the lhs instead of the rhs block. 1670 if (CB.TrueBB == NextBlock(SwitchBB)) { 1671 std::swap(CB.TrueBB, CB.FalseBB); 1672 SDValue True = DAG.getConstant(1, dl, Cond.getValueType()); 1673 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True); 1674 } 1675 1676 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 1677 MVT::Other, getControlRoot(), Cond, 1678 DAG.getBasicBlock(CB.TrueBB)); 1679 1680 // Insert the false branch. Do this even if it's a fall through branch, 1681 // this makes it easier to do DAG optimizations which require inverting 1682 // the branch condition. 1683 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, 1684 DAG.getBasicBlock(CB.FalseBB)); 1685 1686 DAG.setRoot(BrCond); 1687 } 1688 1689 /// visitJumpTable - Emit JumpTable node in the current MBB 1690 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) { 1691 // Emit the code for the jump table 1692 assert(JT.Reg != -1U && "Should lower JT Header first!"); 1693 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 1694 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(), 1695 JT.Reg, PTy); 1696 SDValue Table = DAG.getJumpTable(JT.JTI, PTy); 1697 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(), 1698 MVT::Other, Index.getValue(1), 1699 Table, Index); 1700 DAG.setRoot(BrJumpTable); 1701 } 1702 1703 /// visitJumpTableHeader - This function emits necessary code to produce index 1704 /// in the JumpTable from switch case. 1705 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT, 1706 JumpTableHeader &JTH, 1707 MachineBasicBlock *SwitchBB) { 1708 SDLoc dl = getCurSDLoc(); 1709 1710 // Subtract the lowest switch case value from the value being switched on and 1711 // conditional branch to default mbb if the result is greater than the 1712 // difference between smallest and largest cases. 1713 SDValue SwitchOp = getValue(JTH.SValue); 1714 EVT VT = SwitchOp.getValueType(); 1715 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp, 1716 DAG.getConstant(JTH.First, dl, VT)); 1717 1718 // The SDNode we just created, which holds the value being switched on minus 1719 // the smallest case value, needs to be copied to a virtual register so it 1720 // can be used as an index into the jump table in a subsequent basic block. 1721 // This value may be smaller or larger than the target's pointer type, and 1722 // therefore require extension or truncating. 1723 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1724 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout())); 1725 1726 unsigned JumpTableReg = 1727 FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout())); 1728 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, 1729 JumpTableReg, SwitchOp); 1730 JT.Reg = JumpTableReg; 1731 1732 // Emit the range check for the jump table, and branch to the default block 1733 // for the switch statement if the value being switched on exceeds the largest 1734 // case in the switch. 1735 SDValue CMP = DAG.getSetCC( 1736 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 1737 Sub.getValueType()), 1738 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT); 1739 1740 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 1741 MVT::Other, CopyTo, CMP, 1742 DAG.getBasicBlock(JT.Default)); 1743 1744 // Avoid emitting unnecessary branches to the next block. 1745 if (JT.MBB != NextBlock(SwitchBB)) 1746 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, 1747 DAG.getBasicBlock(JT.MBB)); 1748 1749 DAG.setRoot(BrCond); 1750 } 1751 1752 /// Codegen a new tail for a stack protector check ParentMBB which has had its 1753 /// tail spliced into a stack protector check success bb. 1754 /// 1755 /// For a high level explanation of how this fits into the stack protector 1756 /// generation see the comment on the declaration of class 1757 /// StackProtectorDescriptor. 1758 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD, 1759 MachineBasicBlock *ParentBB) { 1760 1761 // First create the loads to the guard/stack slot for the comparison. 1762 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1763 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 1764 1765 MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo(); 1766 int FI = MFI->getStackProtectorIndex(); 1767 1768 const Value *IRGuard = SPD.getGuard(); 1769 SDValue GuardPtr = getValue(IRGuard); 1770 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy); 1771 1772 unsigned Align = DL->getPrefTypeAlignment(IRGuard->getType()); 1773 1774 SDValue Guard; 1775 SDLoc dl = getCurSDLoc(); 1776 1777 // If GuardReg is set and useLoadStackGuardNode returns true, retrieve the 1778 // guard value from the virtual register holding the value. Otherwise, emit a 1779 // volatile load to retrieve the stack guard value. 1780 unsigned GuardReg = SPD.getGuardReg(); 1781 1782 if (GuardReg && TLI.useLoadStackGuardNode()) 1783 Guard = DAG.getCopyFromReg(DAG.getEntryNode(), dl, GuardReg, 1784 PtrTy); 1785 else 1786 Guard = DAG.getLoad(PtrTy, dl, DAG.getEntryNode(), 1787 GuardPtr, MachinePointerInfo(IRGuard, 0), 1788 true, false, false, Align); 1789 1790 SDValue StackSlot = DAG.getLoad(PtrTy, dl, DAG.getEntryNode(), 1791 StackSlotPtr, 1792 MachinePointerInfo::getFixedStack(FI), 1793 true, false, false, Align); 1794 1795 // Perform the comparison via a subtract/getsetcc. 1796 EVT VT = Guard.getValueType(); 1797 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, StackSlot); 1798 1799 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(), 1800 *DAG.getContext(), 1801 Sub.getValueType()), 1802 Sub, DAG.getConstant(0, dl, VT), ISD::SETNE); 1803 1804 // If the sub is not 0, then we know the guard/stackslot do not equal, so 1805 // branch to failure MBB. 1806 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 1807 MVT::Other, StackSlot.getOperand(0), 1808 Cmp, DAG.getBasicBlock(SPD.getFailureMBB())); 1809 // Otherwise branch to success MBB. 1810 SDValue Br = DAG.getNode(ISD::BR, dl, 1811 MVT::Other, BrCond, 1812 DAG.getBasicBlock(SPD.getSuccessMBB())); 1813 1814 DAG.setRoot(Br); 1815 } 1816 1817 /// Codegen the failure basic block for a stack protector check. 1818 /// 1819 /// A failure stack protector machine basic block consists simply of a call to 1820 /// __stack_chk_fail(). 1821 /// 1822 /// For a high level explanation of how this fits into the stack protector 1823 /// generation see the comment on the declaration of class 1824 /// StackProtectorDescriptor. 1825 void 1826 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) { 1827 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1828 SDValue Chain = 1829 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid, 1830 nullptr, 0, false, getCurSDLoc(), false, false).second; 1831 DAG.setRoot(Chain); 1832 } 1833 1834 /// visitBitTestHeader - This function emits necessary code to produce value 1835 /// suitable for "bit tests" 1836 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B, 1837 MachineBasicBlock *SwitchBB) { 1838 SDLoc dl = getCurSDLoc(); 1839 1840 // Subtract the minimum value 1841 SDValue SwitchOp = getValue(B.SValue); 1842 EVT VT = SwitchOp.getValueType(); 1843 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp, 1844 DAG.getConstant(B.First, dl, VT)); 1845 1846 // Check range 1847 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1848 SDValue RangeCmp = DAG.getSetCC( 1849 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 1850 Sub.getValueType()), 1851 Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT); 1852 1853 // Determine the type of the test operands. 1854 bool UsePtrType = false; 1855 if (!TLI.isTypeLegal(VT)) 1856 UsePtrType = true; 1857 else { 1858 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i) 1859 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) { 1860 // Switch table case range are encoded into series of masks. 1861 // Just use pointer type, it's guaranteed to fit. 1862 UsePtrType = true; 1863 break; 1864 } 1865 } 1866 if (UsePtrType) { 1867 VT = TLI.getPointerTy(DAG.getDataLayout()); 1868 Sub = DAG.getZExtOrTrunc(Sub, dl, VT); 1869 } 1870 1871 B.RegVT = VT.getSimpleVT(); 1872 B.Reg = FuncInfo.CreateReg(B.RegVT); 1873 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub); 1874 1875 MachineBasicBlock* MBB = B.Cases[0].ThisBB; 1876 1877 addSuccessorWithWeight(SwitchBB, B.Default); 1878 addSuccessorWithWeight(SwitchBB, MBB); 1879 1880 SDValue BrRange = DAG.getNode(ISD::BRCOND, dl, 1881 MVT::Other, CopyTo, RangeCmp, 1882 DAG.getBasicBlock(B.Default)); 1883 1884 // Avoid emitting unnecessary branches to the next block. 1885 if (MBB != NextBlock(SwitchBB)) 1886 BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange, 1887 DAG.getBasicBlock(MBB)); 1888 1889 DAG.setRoot(BrRange); 1890 } 1891 1892 /// visitBitTestCase - this function produces one "bit test" 1893 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB, 1894 MachineBasicBlock* NextMBB, 1895 uint32_t BranchWeightToNext, 1896 unsigned Reg, 1897 BitTestCase &B, 1898 MachineBasicBlock *SwitchBB) { 1899 SDLoc dl = getCurSDLoc(); 1900 MVT VT = BB.RegVT; 1901 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT); 1902 SDValue Cmp; 1903 unsigned PopCount = countPopulation(B.Mask); 1904 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1905 if (PopCount == 1) { 1906 // Testing for a single bit; just compare the shift count with what it 1907 // would need to be to shift a 1 bit in that position. 1908 Cmp = DAG.getSetCC( 1909 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 1910 ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT), 1911 ISD::SETEQ); 1912 } else if (PopCount == BB.Range) { 1913 // There is only one zero bit in the range, test for it directly. 1914 Cmp = DAG.getSetCC( 1915 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 1916 ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT), 1917 ISD::SETNE); 1918 } else { 1919 // Make desired shift 1920 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT, 1921 DAG.getConstant(1, dl, VT), ShiftOp); 1922 1923 // Emit bit tests and jumps 1924 SDValue AndOp = DAG.getNode(ISD::AND, dl, 1925 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT)); 1926 Cmp = DAG.getSetCC( 1927 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 1928 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE); 1929 } 1930 1931 // The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight. 1932 addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight); 1933 // The branch weight from SwitchBB to NextMBB is BranchWeightToNext. 1934 addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext); 1935 1936 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl, 1937 MVT::Other, getControlRoot(), 1938 Cmp, DAG.getBasicBlock(B.TargetBB)); 1939 1940 // Avoid emitting unnecessary branches to the next block. 1941 if (NextMBB != NextBlock(SwitchBB)) 1942 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd, 1943 DAG.getBasicBlock(NextMBB)); 1944 1945 DAG.setRoot(BrAnd); 1946 } 1947 1948 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { 1949 MachineBasicBlock *InvokeMBB = FuncInfo.MBB; 1950 1951 // Retrieve successors. 1952 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; 1953 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)]; 1954 1955 const Value *Callee(I.getCalledValue()); 1956 const Function *Fn = dyn_cast<Function>(Callee); 1957 if (isa<InlineAsm>(Callee)) 1958 visitInlineAsm(&I); 1959 else if (Fn && Fn->isIntrinsic()) { 1960 switch (Fn->getIntrinsicID()) { 1961 default: 1962 llvm_unreachable("Cannot invoke this intrinsic"); 1963 case Intrinsic::donothing: 1964 // Ignore invokes to @llvm.donothing: jump directly to the next BB. 1965 break; 1966 case Intrinsic::experimental_patchpoint_void: 1967 case Intrinsic::experimental_patchpoint_i64: 1968 visitPatchpoint(&I, LandingPad); 1969 break; 1970 case Intrinsic::experimental_gc_statepoint: 1971 LowerStatepoint(ImmutableStatepoint(&I), LandingPad); 1972 break; 1973 } 1974 } else 1975 LowerCallTo(&I, getValue(Callee), false, LandingPad); 1976 1977 // If the value of the invoke is used outside of its defining block, make it 1978 // available as a virtual register. 1979 // We already took care of the exported value for the statepoint instruction 1980 // during call to the LowerStatepoint. 1981 if (!isStatepoint(I)) { 1982 CopyToExportRegsIfNeeded(&I); 1983 } 1984 1985 // Update successor info 1986 addSuccessorWithWeight(InvokeMBB, Return); 1987 addSuccessorWithWeight(InvokeMBB, LandingPad); 1988 1989 // Drop into normal successor. 1990 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), 1991 MVT::Other, getControlRoot(), 1992 DAG.getBasicBlock(Return))); 1993 } 1994 1995 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) { 1996 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!"); 1997 } 1998 1999 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) { 2000 assert(FuncInfo.MBB->isLandingPad() && 2001 "Call to landingpad not in landing pad!"); 2002 2003 MachineBasicBlock *MBB = FuncInfo.MBB; 2004 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 2005 AddLandingPadInfo(LP, MMI, MBB); 2006 2007 // If there aren't registers to copy the values into (e.g., during SjLj 2008 // exceptions), then don't bother to create these DAG nodes. 2009 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2010 if (TLI.getExceptionPointerRegister() == 0 && 2011 TLI.getExceptionSelectorRegister() == 0) 2012 return; 2013 2014 SmallVector<EVT, 2> ValueVTs; 2015 SDLoc dl = getCurSDLoc(); 2016 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs); 2017 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported"); 2018 2019 // Get the two live-in registers as SDValues. The physregs have already been 2020 // copied into virtual registers. 2021 SDValue Ops[2]; 2022 if (FuncInfo.ExceptionPointerVirtReg) { 2023 Ops[0] = DAG.getZExtOrTrunc( 2024 DAG.getCopyFromReg(DAG.getEntryNode(), dl, 2025 FuncInfo.ExceptionPointerVirtReg, 2026 TLI.getPointerTy(DAG.getDataLayout())), 2027 dl, ValueVTs[0]); 2028 } else { 2029 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout())); 2030 } 2031 Ops[1] = DAG.getZExtOrTrunc( 2032 DAG.getCopyFromReg(DAG.getEntryNode(), dl, 2033 FuncInfo.ExceptionSelectorVirtReg, 2034 TLI.getPointerTy(DAG.getDataLayout())), 2035 dl, ValueVTs[1]); 2036 2037 // Merge into one. 2038 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, 2039 DAG.getVTList(ValueVTs), Ops); 2040 setValue(&LP, Res); 2041 } 2042 2043 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) { 2044 #ifndef NDEBUG 2045 for (const CaseCluster &CC : Clusters) 2046 assert(CC.Low == CC.High && "Input clusters must be single-case"); 2047 #endif 2048 2049 std::sort(Clusters.begin(), Clusters.end(), 2050 [](const CaseCluster &a, const CaseCluster &b) { 2051 return a.Low->getValue().slt(b.Low->getValue()); 2052 }); 2053 2054 // Merge adjacent clusters with the same destination. 2055 const unsigned N = Clusters.size(); 2056 unsigned DstIndex = 0; 2057 for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) { 2058 CaseCluster &CC = Clusters[SrcIndex]; 2059 const ConstantInt *CaseVal = CC.Low; 2060 MachineBasicBlock *Succ = CC.MBB; 2061 2062 if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ && 2063 (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) { 2064 // If this case has the same successor and is a neighbour, merge it into 2065 // the previous cluster. 2066 Clusters[DstIndex - 1].High = CaseVal; 2067 Clusters[DstIndex - 1].Weight += CC.Weight; 2068 assert(Clusters[DstIndex - 1].Weight >= CC.Weight && "Weight overflow!"); 2069 } else { 2070 std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex], 2071 sizeof(Clusters[SrcIndex])); 2072 } 2073 } 2074 Clusters.resize(DstIndex); 2075 } 2076 2077 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First, 2078 MachineBasicBlock *Last) { 2079 // Update JTCases. 2080 for (unsigned i = 0, e = JTCases.size(); i != e; ++i) 2081 if (JTCases[i].first.HeaderBB == First) 2082 JTCases[i].first.HeaderBB = Last; 2083 2084 // Update BitTestCases. 2085 for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i) 2086 if (BitTestCases[i].Parent == First) 2087 BitTestCases[i].Parent = Last; 2088 } 2089 2090 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) { 2091 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB; 2092 2093 // Update machine-CFG edges with unique successors. 2094 SmallSet<BasicBlock*, 32> Done; 2095 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) { 2096 BasicBlock *BB = I.getSuccessor(i); 2097 bool Inserted = Done.insert(BB).second; 2098 if (!Inserted) 2099 continue; 2100 2101 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB]; 2102 addSuccessorWithWeight(IndirectBrMBB, Succ); 2103 } 2104 2105 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(), 2106 MVT::Other, getControlRoot(), 2107 getValue(I.getAddress()))); 2108 } 2109 2110 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) { 2111 if (DAG.getTarget().Options.TrapUnreachable) 2112 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot())); 2113 } 2114 2115 void SelectionDAGBuilder::visitFSub(const User &I) { 2116 // -0.0 - X --> fneg 2117 Type *Ty = I.getType(); 2118 if (isa<Constant>(I.getOperand(0)) && 2119 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) { 2120 SDValue Op2 = getValue(I.getOperand(1)); 2121 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(), 2122 Op2.getValueType(), Op2)); 2123 return; 2124 } 2125 2126 visitBinary(I, ISD::FSUB); 2127 } 2128 2129 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) { 2130 SDValue Op1 = getValue(I.getOperand(0)); 2131 SDValue Op2 = getValue(I.getOperand(1)); 2132 2133 bool nuw = false; 2134 bool nsw = false; 2135 bool exact = false; 2136 FastMathFlags FMF; 2137 2138 if (const OverflowingBinaryOperator *OFBinOp = 2139 dyn_cast<const OverflowingBinaryOperator>(&I)) { 2140 nuw = OFBinOp->hasNoUnsignedWrap(); 2141 nsw = OFBinOp->hasNoSignedWrap(); 2142 } 2143 if (const PossiblyExactOperator *ExactOp = 2144 dyn_cast<const PossiblyExactOperator>(&I)) 2145 exact = ExactOp->isExact(); 2146 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&I)) 2147 FMF = FPOp->getFastMathFlags(); 2148 2149 SDNodeFlags Flags; 2150 Flags.setExact(exact); 2151 Flags.setNoSignedWrap(nsw); 2152 Flags.setNoUnsignedWrap(nuw); 2153 if (EnableFMFInDAG) { 2154 Flags.setAllowReciprocal(FMF.allowReciprocal()); 2155 Flags.setNoInfs(FMF.noInfs()); 2156 Flags.setNoNaNs(FMF.noNaNs()); 2157 Flags.setNoSignedZeros(FMF.noSignedZeros()); 2158 Flags.setUnsafeAlgebra(FMF.unsafeAlgebra()); 2159 } 2160 SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(), 2161 Op1, Op2, &Flags); 2162 setValue(&I, BinNodeValue); 2163 } 2164 2165 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) { 2166 SDValue Op1 = getValue(I.getOperand(0)); 2167 SDValue Op2 = getValue(I.getOperand(1)); 2168 2169 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy( 2170 Op2.getValueType(), DAG.getDataLayout()); 2171 2172 // Coerce the shift amount to the right type if we can. 2173 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) { 2174 unsigned ShiftSize = ShiftTy.getSizeInBits(); 2175 unsigned Op2Size = Op2.getValueType().getSizeInBits(); 2176 SDLoc DL = getCurSDLoc(); 2177 2178 // If the operand is smaller than the shift count type, promote it. 2179 if (ShiftSize > Op2Size) 2180 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2); 2181 2182 // If the operand is larger than the shift count type but the shift 2183 // count type has enough bits to represent any shift value, truncate 2184 // it now. This is a common case and it exposes the truncate to 2185 // optimization early. 2186 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits())) 2187 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2); 2188 // Otherwise we'll need to temporarily settle for some other convenient 2189 // type. Type legalization will make adjustments once the shiftee is split. 2190 else 2191 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32); 2192 } 2193 2194 bool nuw = false; 2195 bool nsw = false; 2196 bool exact = false; 2197 2198 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) { 2199 2200 if (const OverflowingBinaryOperator *OFBinOp = 2201 dyn_cast<const OverflowingBinaryOperator>(&I)) { 2202 nuw = OFBinOp->hasNoUnsignedWrap(); 2203 nsw = OFBinOp->hasNoSignedWrap(); 2204 } 2205 if (const PossiblyExactOperator *ExactOp = 2206 dyn_cast<const PossiblyExactOperator>(&I)) 2207 exact = ExactOp->isExact(); 2208 } 2209 SDNodeFlags Flags; 2210 Flags.setExact(exact); 2211 Flags.setNoSignedWrap(nsw); 2212 Flags.setNoUnsignedWrap(nuw); 2213 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2, 2214 &Flags); 2215 setValue(&I, Res); 2216 } 2217 2218 void SelectionDAGBuilder::visitSDiv(const User &I) { 2219 SDValue Op1 = getValue(I.getOperand(0)); 2220 SDValue Op2 = getValue(I.getOperand(1)); 2221 2222 SDNodeFlags Flags; 2223 Flags.setExact(isa<PossiblyExactOperator>(&I) && 2224 cast<PossiblyExactOperator>(&I)->isExact()); 2225 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1, 2226 Op2, &Flags)); 2227 } 2228 2229 void SelectionDAGBuilder::visitICmp(const User &I) { 2230 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE; 2231 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I)) 2232 predicate = IC->getPredicate(); 2233 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) 2234 predicate = ICmpInst::Predicate(IC->getPredicate()); 2235 SDValue Op1 = getValue(I.getOperand(0)); 2236 SDValue Op2 = getValue(I.getOperand(1)); 2237 ISD::CondCode Opcode = getICmpCondCode(predicate); 2238 2239 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2240 I.getType()); 2241 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode)); 2242 } 2243 2244 void SelectionDAGBuilder::visitFCmp(const User &I) { 2245 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE; 2246 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I)) 2247 predicate = FC->getPredicate(); 2248 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) 2249 predicate = FCmpInst::Predicate(FC->getPredicate()); 2250 SDValue Op1 = getValue(I.getOperand(0)); 2251 SDValue Op2 = getValue(I.getOperand(1)); 2252 ISD::CondCode Condition = getFCmpCondCode(predicate); 2253 if (TM.Options.NoNaNsFPMath) 2254 Condition = getFCmpCodeWithoutNaN(Condition); 2255 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2256 I.getType()); 2257 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition)); 2258 } 2259 2260 void SelectionDAGBuilder::visitSelect(const User &I) { 2261 SmallVector<EVT, 4> ValueVTs; 2262 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(), 2263 ValueVTs); 2264 unsigned NumValues = ValueVTs.size(); 2265 if (NumValues == 0) return; 2266 2267 SmallVector<SDValue, 4> Values(NumValues); 2268 SDValue Cond = getValue(I.getOperand(0)); 2269 SDValue LHSVal = getValue(I.getOperand(1)); 2270 SDValue RHSVal = getValue(I.getOperand(2)); 2271 auto BaseOps = {Cond}; 2272 ISD::NodeType OpCode = Cond.getValueType().isVector() ? 2273 ISD::VSELECT : ISD::SELECT; 2274 2275 // Min/max matching is only viable if all output VTs are the same. 2276 if (std::equal(ValueVTs.begin(), ValueVTs.end(), ValueVTs.begin())) { 2277 Value *LHS, *RHS; 2278 SelectPatternFlavor SPF = matchSelectPattern(const_cast<User*>(&I), LHS, RHS); 2279 ISD::NodeType Opc = ISD::DELETED_NODE; 2280 switch (SPF) { 2281 case SPF_UMAX: Opc = ISD::UMAX; break; 2282 case SPF_UMIN: Opc = ISD::UMIN; break; 2283 case SPF_SMAX: Opc = ISD::SMAX; break; 2284 case SPF_SMIN: Opc = ISD::SMIN; break; 2285 default: break; 2286 } 2287 2288 EVT VT = ValueVTs[0]; 2289 LLVMContext &Ctx = *DAG.getContext(); 2290 auto &TLI = DAG.getTargetLoweringInfo(); 2291 while (TLI.getTypeAction(Ctx, VT) == TargetLoweringBase::TypeSplitVector) 2292 VT = TLI.getTypeToTransformTo(Ctx, VT); 2293 2294 if (Opc != ISD::DELETED_NODE && TLI.isOperationLegalOrCustom(Opc, VT) && 2295 // If the underlying comparison instruction is used by any other instruction, 2296 // the consumed instructions won't be destroyed, so it is not profitable 2297 // to convert to a min/max. 2298 cast<SelectInst>(&I)->getCondition()->hasOneUse()) { 2299 OpCode = Opc; 2300 LHSVal = getValue(LHS); 2301 RHSVal = getValue(RHS); 2302 BaseOps = {}; 2303 } 2304 } 2305 2306 for (unsigned i = 0; i != NumValues; ++i) { 2307 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end()); 2308 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i)); 2309 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i)); 2310 Values[i] = DAG.getNode(OpCode, getCurSDLoc(), 2311 LHSVal.getNode()->getValueType(LHSVal.getResNo()+i), 2312 Ops); 2313 } 2314 2315 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 2316 DAG.getVTList(ValueVTs), Values)); 2317 } 2318 2319 void SelectionDAGBuilder::visitTrunc(const User &I) { 2320 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). 2321 SDValue N = getValue(I.getOperand(0)); 2322 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2323 I.getType()); 2324 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N)); 2325 } 2326 2327 void SelectionDAGBuilder::visitZExt(const User &I) { 2328 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 2329 // ZExt also can't be a cast to bool for same reason. So, nothing much to do 2330 SDValue N = getValue(I.getOperand(0)); 2331 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2332 I.getType()); 2333 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N)); 2334 } 2335 2336 void SelectionDAGBuilder::visitSExt(const User &I) { 2337 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 2338 // SExt also can't be a cast to bool for same reason. So, nothing much to do 2339 SDValue N = getValue(I.getOperand(0)); 2340 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2341 I.getType()); 2342 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N)); 2343 } 2344 2345 void SelectionDAGBuilder::visitFPTrunc(const User &I) { 2346 // FPTrunc is never a no-op cast, no need to check 2347 SDValue N = getValue(I.getOperand(0)); 2348 SDLoc dl = getCurSDLoc(); 2349 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2350 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 2351 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N, 2352 DAG.getTargetConstant( 2353 0, dl, TLI.getPointerTy(DAG.getDataLayout())))); 2354 } 2355 2356 void SelectionDAGBuilder::visitFPExt(const User &I) { 2357 // FPExt is never a no-op cast, no need to check 2358 SDValue N = getValue(I.getOperand(0)); 2359 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2360 I.getType()); 2361 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N)); 2362 } 2363 2364 void SelectionDAGBuilder::visitFPToUI(const User &I) { 2365 // FPToUI is never a no-op cast, no need to check 2366 SDValue N = getValue(I.getOperand(0)); 2367 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2368 I.getType()); 2369 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N)); 2370 } 2371 2372 void SelectionDAGBuilder::visitFPToSI(const User &I) { 2373 // FPToSI is never a no-op cast, no need to check 2374 SDValue N = getValue(I.getOperand(0)); 2375 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2376 I.getType()); 2377 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N)); 2378 } 2379 2380 void SelectionDAGBuilder::visitUIToFP(const User &I) { 2381 // UIToFP is never a no-op cast, no need to check 2382 SDValue N = getValue(I.getOperand(0)); 2383 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2384 I.getType()); 2385 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N)); 2386 } 2387 2388 void SelectionDAGBuilder::visitSIToFP(const User &I) { 2389 // SIToFP is never a no-op cast, no need to check 2390 SDValue N = getValue(I.getOperand(0)); 2391 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2392 I.getType()); 2393 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N)); 2394 } 2395 2396 void SelectionDAGBuilder::visitPtrToInt(const User &I) { 2397 // What to do depends on the size of the integer and the size of the pointer. 2398 // We can either truncate, zero extend, or no-op, accordingly. 2399 SDValue N = getValue(I.getOperand(0)); 2400 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2401 I.getType()); 2402 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT)); 2403 } 2404 2405 void SelectionDAGBuilder::visitIntToPtr(const User &I) { 2406 // What to do depends on the size of the integer and the size of the pointer. 2407 // We can either truncate, zero extend, or no-op, accordingly. 2408 SDValue N = getValue(I.getOperand(0)); 2409 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2410 I.getType()); 2411 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT)); 2412 } 2413 2414 void SelectionDAGBuilder::visitBitCast(const User &I) { 2415 SDValue N = getValue(I.getOperand(0)); 2416 SDLoc dl = getCurSDLoc(); 2417 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2418 I.getType()); 2419 2420 // BitCast assures us that source and destination are the same size so this is 2421 // either a BITCAST or a no-op. 2422 if (DestVT != N.getValueType()) 2423 setValue(&I, DAG.getNode(ISD::BITCAST, dl, 2424 DestVT, N)); // convert types. 2425 // Check if the original LLVM IR Operand was a ConstantInt, because getValue() 2426 // might fold any kind of constant expression to an integer constant and that 2427 // is not what we are looking for. Only regcognize a bitcast of a genuine 2428 // constant integer as an opaque constant. 2429 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0))) 2430 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false, 2431 /*isOpaque*/true)); 2432 else 2433 setValue(&I, N); // noop cast. 2434 } 2435 2436 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) { 2437 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2438 const Value *SV = I.getOperand(0); 2439 SDValue N = getValue(SV); 2440 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 2441 2442 unsigned SrcAS = SV->getType()->getPointerAddressSpace(); 2443 unsigned DestAS = I.getType()->getPointerAddressSpace(); 2444 2445 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 2446 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS); 2447 2448 setValue(&I, N); 2449 } 2450 2451 void SelectionDAGBuilder::visitInsertElement(const User &I) { 2452 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2453 SDValue InVec = getValue(I.getOperand(0)); 2454 SDValue InVal = getValue(I.getOperand(1)); 2455 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(), 2456 TLI.getVectorIdxTy(DAG.getDataLayout())); 2457 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(), 2458 TLI.getValueType(DAG.getDataLayout(), I.getType()), 2459 InVec, InVal, InIdx)); 2460 } 2461 2462 void SelectionDAGBuilder::visitExtractElement(const User &I) { 2463 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2464 SDValue InVec = getValue(I.getOperand(0)); 2465 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(), 2466 TLI.getVectorIdxTy(DAG.getDataLayout())); 2467 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(), 2468 TLI.getValueType(DAG.getDataLayout(), I.getType()), 2469 InVec, InIdx)); 2470 } 2471 2472 // Utility for visitShuffleVector - Return true if every element in Mask, 2473 // beginning from position Pos and ending in Pos+Size, falls within the 2474 // specified sequential range [L, L+Pos). or is undef. 2475 static bool isSequentialInRange(const SmallVectorImpl<int> &Mask, 2476 unsigned Pos, unsigned Size, int Low) { 2477 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low) 2478 if (Mask[i] >= 0 && Mask[i] != Low) 2479 return false; 2480 return true; 2481 } 2482 2483 void SelectionDAGBuilder::visitShuffleVector(const User &I) { 2484 SDValue Src1 = getValue(I.getOperand(0)); 2485 SDValue Src2 = getValue(I.getOperand(1)); 2486 2487 SmallVector<int, 8> Mask; 2488 ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask); 2489 unsigned MaskNumElts = Mask.size(); 2490 2491 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2492 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 2493 EVT SrcVT = Src1.getValueType(); 2494 unsigned SrcNumElts = SrcVT.getVectorNumElements(); 2495 2496 if (SrcNumElts == MaskNumElts) { 2497 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2, 2498 &Mask[0])); 2499 return; 2500 } 2501 2502 // Normalize the shuffle vector since mask and vector length don't match. 2503 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) { 2504 // Mask is longer than the source vectors and is a multiple of the source 2505 // vectors. We can use concatenate vector to make the mask and vectors 2506 // lengths match. 2507 if (SrcNumElts*2 == MaskNumElts) { 2508 // First check for Src1 in low and Src2 in high 2509 if (isSequentialInRange(Mask, 0, SrcNumElts, 0) && 2510 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) { 2511 // The shuffle is concatenating two vectors together. 2512 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(), 2513 VT, Src1, Src2)); 2514 return; 2515 } 2516 // Then check for Src2 in low and Src1 in high 2517 if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) && 2518 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) { 2519 // The shuffle is concatenating two vectors together. 2520 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(), 2521 VT, Src2, Src1)); 2522 return; 2523 } 2524 } 2525 2526 // Pad both vectors with undefs to make them the same length as the mask. 2527 unsigned NumConcat = MaskNumElts / SrcNumElts; 2528 bool Src1U = Src1.getOpcode() == ISD::UNDEF; 2529 bool Src2U = Src2.getOpcode() == ISD::UNDEF; 2530 SDValue UndefVal = DAG.getUNDEF(SrcVT); 2531 2532 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal); 2533 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal); 2534 MOps1[0] = Src1; 2535 MOps2[0] = Src2; 2536 2537 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS, 2538 getCurSDLoc(), VT, MOps1); 2539 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS, 2540 getCurSDLoc(), VT, MOps2); 2541 2542 // Readjust mask for new input vector length. 2543 SmallVector<int, 8> MappedOps; 2544 for (unsigned i = 0; i != MaskNumElts; ++i) { 2545 int Idx = Mask[i]; 2546 if (Idx >= (int)SrcNumElts) 2547 Idx -= SrcNumElts - MaskNumElts; 2548 MappedOps.push_back(Idx); 2549 } 2550 2551 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2, 2552 &MappedOps[0])); 2553 return; 2554 } 2555 2556 if (SrcNumElts > MaskNumElts) { 2557 // Analyze the access pattern of the vector to see if we can extract 2558 // two subvectors and do the shuffle. The analysis is done by calculating 2559 // the range of elements the mask access on both vectors. 2560 int MinRange[2] = { static_cast<int>(SrcNumElts), 2561 static_cast<int>(SrcNumElts)}; 2562 int MaxRange[2] = {-1, -1}; 2563 2564 for (unsigned i = 0; i != MaskNumElts; ++i) { 2565 int Idx = Mask[i]; 2566 unsigned Input = 0; 2567 if (Idx < 0) 2568 continue; 2569 2570 if (Idx >= (int)SrcNumElts) { 2571 Input = 1; 2572 Idx -= SrcNumElts; 2573 } 2574 if (Idx > MaxRange[Input]) 2575 MaxRange[Input] = Idx; 2576 if (Idx < MinRange[Input]) 2577 MinRange[Input] = Idx; 2578 } 2579 2580 // Check if the access is smaller than the vector size and can we find 2581 // a reasonable extract index. 2582 int RangeUse[2] = { -1, -1 }; // 0 = Unused, 1 = Extract, -1 = Can not 2583 // Extract. 2584 int StartIdx[2]; // StartIdx to extract from 2585 for (unsigned Input = 0; Input < 2; ++Input) { 2586 if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) { 2587 RangeUse[Input] = 0; // Unused 2588 StartIdx[Input] = 0; 2589 continue; 2590 } 2591 2592 // Find a good start index that is a multiple of the mask length. Then 2593 // see if the rest of the elements are in range. 2594 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts; 2595 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts && 2596 StartIdx[Input] + MaskNumElts <= SrcNumElts) 2597 RangeUse[Input] = 1; // Extract from a multiple of the mask length. 2598 } 2599 2600 if (RangeUse[0] == 0 && RangeUse[1] == 0) { 2601 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used. 2602 return; 2603 } 2604 if (RangeUse[0] >= 0 && RangeUse[1] >= 0) { 2605 // Extract appropriate subvector and generate a vector shuffle 2606 for (unsigned Input = 0; Input < 2; ++Input) { 2607 SDValue &Src = Input == 0 ? Src1 : Src2; 2608 if (RangeUse[Input] == 0) 2609 Src = DAG.getUNDEF(VT); 2610 else { 2611 SDLoc dl = getCurSDLoc(); 2612 Src = DAG.getNode( 2613 ISD::EXTRACT_SUBVECTOR, dl, VT, Src, 2614 DAG.getConstant(StartIdx[Input], dl, 2615 TLI.getVectorIdxTy(DAG.getDataLayout()))); 2616 } 2617 } 2618 2619 // Calculate new mask. 2620 SmallVector<int, 8> MappedOps; 2621 for (unsigned i = 0; i != MaskNumElts; ++i) { 2622 int Idx = Mask[i]; 2623 if (Idx >= 0) { 2624 if (Idx < (int)SrcNumElts) 2625 Idx -= StartIdx[0]; 2626 else 2627 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts; 2628 } 2629 MappedOps.push_back(Idx); 2630 } 2631 2632 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2, 2633 &MappedOps[0])); 2634 return; 2635 } 2636 } 2637 2638 // We can't use either concat vectors or extract subvectors so fall back to 2639 // replacing the shuffle with extract and build vector. 2640 // to insert and build vector. 2641 EVT EltVT = VT.getVectorElementType(); 2642 EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout()); 2643 SDLoc dl = getCurSDLoc(); 2644 SmallVector<SDValue,8> Ops; 2645 for (unsigned i = 0; i != MaskNumElts; ++i) { 2646 int Idx = Mask[i]; 2647 SDValue Res; 2648 2649 if (Idx < 0) { 2650 Res = DAG.getUNDEF(EltVT); 2651 } else { 2652 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2; 2653 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts; 2654 2655 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, 2656 EltVT, Src, DAG.getConstant(Idx, dl, IdxVT)); 2657 } 2658 2659 Ops.push_back(Res); 2660 } 2661 2662 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops)); 2663 } 2664 2665 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) { 2666 const Value *Op0 = I.getOperand(0); 2667 const Value *Op1 = I.getOperand(1); 2668 Type *AggTy = I.getType(); 2669 Type *ValTy = Op1->getType(); 2670 bool IntoUndef = isa<UndefValue>(Op0); 2671 bool FromUndef = isa<UndefValue>(Op1); 2672 2673 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices()); 2674 2675 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2676 SmallVector<EVT, 4> AggValueVTs; 2677 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs); 2678 SmallVector<EVT, 4> ValValueVTs; 2679 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs); 2680 2681 unsigned NumAggValues = AggValueVTs.size(); 2682 unsigned NumValValues = ValValueVTs.size(); 2683 SmallVector<SDValue, 4> Values(NumAggValues); 2684 2685 // Ignore an insertvalue that produces an empty object 2686 if (!NumAggValues) { 2687 setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); 2688 return; 2689 } 2690 2691 SDValue Agg = getValue(Op0); 2692 unsigned i = 0; 2693 // Copy the beginning value(s) from the original aggregate. 2694 for (; i != LinearIndex; ++i) 2695 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : 2696 SDValue(Agg.getNode(), Agg.getResNo() + i); 2697 // Copy values from the inserted value(s). 2698 if (NumValValues) { 2699 SDValue Val = getValue(Op1); 2700 for (; i != LinearIndex + NumValValues; ++i) 2701 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) : 2702 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex); 2703 } 2704 // Copy remaining value(s) from the original aggregate. 2705 for (; i != NumAggValues; ++i) 2706 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : 2707 SDValue(Agg.getNode(), Agg.getResNo() + i); 2708 2709 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 2710 DAG.getVTList(AggValueVTs), Values)); 2711 } 2712 2713 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) { 2714 const Value *Op0 = I.getOperand(0); 2715 Type *AggTy = Op0->getType(); 2716 Type *ValTy = I.getType(); 2717 bool OutOfUndef = isa<UndefValue>(Op0); 2718 2719 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices()); 2720 2721 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2722 SmallVector<EVT, 4> ValValueVTs; 2723 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs); 2724 2725 unsigned NumValValues = ValValueVTs.size(); 2726 2727 // Ignore a extractvalue that produces an empty object 2728 if (!NumValValues) { 2729 setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); 2730 return; 2731 } 2732 2733 SmallVector<SDValue, 4> Values(NumValValues); 2734 2735 SDValue Agg = getValue(Op0); 2736 // Copy out the selected value(s). 2737 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i) 2738 Values[i - LinearIndex] = 2739 OutOfUndef ? 2740 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) : 2741 SDValue(Agg.getNode(), Agg.getResNo() + i); 2742 2743 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 2744 DAG.getVTList(ValValueVTs), Values)); 2745 } 2746 2747 void SelectionDAGBuilder::visitGetElementPtr(const User &I) { 2748 Value *Op0 = I.getOperand(0); 2749 // Note that the pointer operand may be a vector of pointers. Take the scalar 2750 // element which holds a pointer. 2751 Type *Ty = Op0->getType()->getScalarType(); 2752 unsigned AS = Ty->getPointerAddressSpace(); 2753 SDValue N = getValue(Op0); 2754 SDLoc dl = getCurSDLoc(); 2755 2756 // Normalize Vector GEP - all scalar operands should be converted to the 2757 // splat vector. 2758 unsigned VectorWidth = I.getType()->isVectorTy() ? 2759 cast<VectorType>(I.getType())->getVectorNumElements() : 0; 2760 2761 if (VectorWidth && !N.getValueType().isVector()) { 2762 MVT VT = MVT::getVectorVT(N.getValueType().getSimpleVT(), VectorWidth); 2763 SmallVector<SDValue, 16> Ops(VectorWidth, N); 2764 N = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops); 2765 } 2766 for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end(); 2767 OI != E; ++OI) { 2768 const Value *Idx = *OI; 2769 if (StructType *StTy = dyn_cast<StructType>(Ty)) { 2770 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 2771 if (Field) { 2772 // N = N + Offset 2773 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field); 2774 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, 2775 DAG.getConstant(Offset, dl, N.getValueType())); 2776 } 2777 2778 Ty = StTy->getElementType(Field); 2779 } else { 2780 Ty = cast<SequentialType>(Ty)->getElementType(); 2781 MVT PtrTy = 2782 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout(), AS); 2783 unsigned PtrSize = PtrTy.getSizeInBits(); 2784 APInt ElementSize(PtrSize, DL->getTypeAllocSize(Ty)); 2785 2786 // If this is a scalar constant or a splat vector of constants, 2787 // handle it quickly. 2788 const auto *CI = dyn_cast<ConstantInt>(Idx); 2789 if (!CI && isa<ConstantDataVector>(Idx) && 2790 cast<ConstantDataVector>(Idx)->getSplatValue()) 2791 CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue()); 2792 2793 if (CI) { 2794 if (CI->isZero()) 2795 continue; 2796 APInt Offs = ElementSize * CI->getValue().sextOrTrunc(PtrSize); 2797 SDValue OffsVal = VectorWidth ? 2798 DAG.getConstant(Offs, dl, MVT::getVectorVT(PtrTy, VectorWidth)) : 2799 DAG.getConstant(Offs, dl, PtrTy); 2800 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal); 2801 continue; 2802 } 2803 2804 // N = N + Idx * ElementSize; 2805 SDValue IdxN = getValue(Idx); 2806 2807 if (!IdxN.getValueType().isVector() && VectorWidth) { 2808 MVT VT = MVT::getVectorVT(IdxN.getValueType().getSimpleVT(), VectorWidth); 2809 SmallVector<SDValue, 16> Ops(VectorWidth, IdxN); 2810 IdxN = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops); 2811 } 2812 // If the index is smaller or larger than intptr_t, truncate or extend 2813 // it. 2814 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType()); 2815 2816 // If this is a multiply by a power of two, turn it into a shl 2817 // immediately. This is a very common case. 2818 if (ElementSize != 1) { 2819 if (ElementSize.isPowerOf2()) { 2820 unsigned Amt = ElementSize.logBase2(); 2821 IdxN = DAG.getNode(ISD::SHL, dl, 2822 N.getValueType(), IdxN, 2823 DAG.getConstant(Amt, dl, IdxN.getValueType())); 2824 } else { 2825 SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType()); 2826 IdxN = DAG.getNode(ISD::MUL, dl, 2827 N.getValueType(), IdxN, Scale); 2828 } 2829 } 2830 2831 N = DAG.getNode(ISD::ADD, dl, 2832 N.getValueType(), N, IdxN); 2833 } 2834 } 2835 2836 setValue(&I, N); 2837 } 2838 2839 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) { 2840 // If this is a fixed sized alloca in the entry block of the function, 2841 // allocate it statically on the stack. 2842 if (FuncInfo.StaticAllocaMap.count(&I)) 2843 return; // getValue will auto-populate this. 2844 2845 SDLoc dl = getCurSDLoc(); 2846 Type *Ty = I.getAllocatedType(); 2847 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2848 auto &DL = DAG.getDataLayout(); 2849 uint64_t TySize = DL.getTypeAllocSize(Ty); 2850 unsigned Align = 2851 std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment()); 2852 2853 SDValue AllocSize = getValue(I.getArraySize()); 2854 2855 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout()); 2856 if (AllocSize.getValueType() != IntPtr) 2857 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr); 2858 2859 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, 2860 AllocSize, 2861 DAG.getConstant(TySize, dl, IntPtr)); 2862 2863 // Handle alignment. If the requested alignment is less than or equal to 2864 // the stack alignment, ignore it. If the size is greater than or equal to 2865 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. 2866 unsigned StackAlign = 2867 DAG.getSubtarget().getFrameLowering()->getStackAlignment(); 2868 if (Align <= StackAlign) 2869 Align = 0; 2870 2871 // Round the size of the allocation up to the stack alignment size 2872 // by add SA-1 to the size. 2873 AllocSize = DAG.getNode(ISD::ADD, dl, 2874 AllocSize.getValueType(), AllocSize, 2875 DAG.getIntPtrConstant(StackAlign - 1, dl)); 2876 2877 // Mask out the low bits for alignment purposes. 2878 AllocSize = DAG.getNode(ISD::AND, dl, 2879 AllocSize.getValueType(), AllocSize, 2880 DAG.getIntPtrConstant(~(uint64_t)(StackAlign - 1), 2881 dl)); 2882 2883 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align, dl) }; 2884 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other); 2885 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops); 2886 setValue(&I, DSA); 2887 DAG.setRoot(DSA.getValue(1)); 2888 2889 assert(FuncInfo.MF->getFrameInfo()->hasVarSizedObjects()); 2890 } 2891 2892 void SelectionDAGBuilder::visitLoad(const LoadInst &I) { 2893 if (I.isAtomic()) 2894 return visitAtomicLoad(I); 2895 2896 const Value *SV = I.getOperand(0); 2897 SDValue Ptr = getValue(SV); 2898 2899 Type *Ty = I.getType(); 2900 2901 bool isVolatile = I.isVolatile(); 2902 bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr; 2903 2904 // The IR notion of invariant_load only guarantees that all *non-faulting* 2905 // invariant loads result in the same value. The MI notion of invariant load 2906 // guarantees that the load can be legally moved to any location within its 2907 // containing function. The MI notion of invariant_load is stronger than the 2908 // IR notion of invariant_load -- an MI invariant_load is an IR invariant_load 2909 // with a guarantee that the location being loaded from is dereferenceable 2910 // throughout the function's lifetime. 2911 2912 bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr && 2913 isDereferenceablePointer(SV, *DAG.getTarget().getDataLayout()); 2914 unsigned Alignment = I.getAlignment(); 2915 2916 AAMDNodes AAInfo; 2917 I.getAAMetadata(AAInfo); 2918 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); 2919 2920 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2921 SmallVector<EVT, 4> ValueVTs; 2922 SmallVector<uint64_t, 4> Offsets; 2923 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets); 2924 unsigned NumValues = ValueVTs.size(); 2925 if (NumValues == 0) 2926 return; 2927 2928 SDValue Root; 2929 bool ConstantMemory = false; 2930 if (isVolatile || NumValues > MaxParallelChains) 2931 // Serialize volatile loads with other side effects. 2932 Root = getRoot(); 2933 else if (AA->pointsToConstantMemory( 2934 MemoryLocation(SV, AA->getTypeStoreSize(Ty), AAInfo))) { 2935 // Do not serialize (non-volatile) loads of constant memory with anything. 2936 Root = DAG.getEntryNode(); 2937 ConstantMemory = true; 2938 } else { 2939 // Do not serialize non-volatile loads against each other. 2940 Root = DAG.getRoot(); 2941 } 2942 2943 SDLoc dl = getCurSDLoc(); 2944 2945 if (isVolatile) 2946 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG); 2947 2948 SmallVector<SDValue, 4> Values(NumValues); 2949 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues)); 2950 EVT PtrVT = Ptr.getValueType(); 2951 unsigned ChainI = 0; 2952 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { 2953 // Serializing loads here may result in excessive register pressure, and 2954 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling 2955 // could recover a bit by hoisting nodes upward in the chain by recognizing 2956 // they are side-effect free or do not alias. The optimizer should really 2957 // avoid this case by converting large object/array copies to llvm.memcpy 2958 // (MaxParallelChains should always remain as failsafe). 2959 if (ChainI == MaxParallelChains) { 2960 assert(PendingLoads.empty() && "PendingLoads must be serialized first"); 2961 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2962 makeArrayRef(Chains.data(), ChainI)); 2963 Root = Chain; 2964 ChainI = 0; 2965 } 2966 SDValue A = DAG.getNode(ISD::ADD, dl, 2967 PtrVT, Ptr, 2968 DAG.getConstant(Offsets[i], dl, PtrVT)); 2969 SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, 2970 A, MachinePointerInfo(SV, Offsets[i]), isVolatile, 2971 isNonTemporal, isInvariant, Alignment, AAInfo, 2972 Ranges); 2973 2974 Values[i] = L; 2975 Chains[ChainI] = L.getValue(1); 2976 } 2977 2978 if (!ConstantMemory) { 2979 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2980 makeArrayRef(Chains.data(), ChainI)); 2981 if (isVolatile) 2982 DAG.setRoot(Chain); 2983 else 2984 PendingLoads.push_back(Chain); 2985 } 2986 2987 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl, 2988 DAG.getVTList(ValueVTs), Values)); 2989 } 2990 2991 void SelectionDAGBuilder::visitStore(const StoreInst &I) { 2992 if (I.isAtomic()) 2993 return visitAtomicStore(I); 2994 2995 const Value *SrcV = I.getOperand(0); 2996 const Value *PtrV = I.getOperand(1); 2997 2998 SmallVector<EVT, 4> ValueVTs; 2999 SmallVector<uint64_t, 4> Offsets; 3000 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), 3001 SrcV->getType(), ValueVTs, &Offsets); 3002 unsigned NumValues = ValueVTs.size(); 3003 if (NumValues == 0) 3004 return; 3005 3006 // Get the lowered operands. Note that we do this after 3007 // checking if NumResults is zero, because with zero results 3008 // the operands won't have values in the map. 3009 SDValue Src = getValue(SrcV); 3010 SDValue Ptr = getValue(PtrV); 3011 3012 SDValue Root = getRoot(); 3013 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues)); 3014 EVT PtrVT = Ptr.getValueType(); 3015 bool isVolatile = I.isVolatile(); 3016 bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr; 3017 unsigned Alignment = I.getAlignment(); 3018 SDLoc dl = getCurSDLoc(); 3019 3020 AAMDNodes AAInfo; 3021 I.getAAMetadata(AAInfo); 3022 3023 unsigned ChainI = 0; 3024 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { 3025 // See visitLoad comments. 3026 if (ChainI == MaxParallelChains) { 3027 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3028 makeArrayRef(Chains.data(), ChainI)); 3029 Root = Chain; 3030 ChainI = 0; 3031 } 3032 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, 3033 DAG.getConstant(Offsets[i], dl, PtrVT)); 3034 SDValue St = DAG.getStore(Root, dl, 3035 SDValue(Src.getNode(), Src.getResNo() + i), 3036 Add, MachinePointerInfo(PtrV, Offsets[i]), 3037 isVolatile, isNonTemporal, Alignment, AAInfo); 3038 Chains[ChainI] = St; 3039 } 3040 3041 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3042 makeArrayRef(Chains.data(), ChainI)); 3043 DAG.setRoot(StoreNode); 3044 } 3045 3046 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) { 3047 SDLoc sdl = getCurSDLoc(); 3048 3049 // llvm.masked.store.*(Src0, Ptr, alignemt, Mask) 3050 Value *PtrOperand = I.getArgOperand(1); 3051 SDValue Ptr = getValue(PtrOperand); 3052 SDValue Src0 = getValue(I.getArgOperand(0)); 3053 SDValue Mask = getValue(I.getArgOperand(3)); 3054 EVT VT = Src0.getValueType(); 3055 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue(); 3056 if (!Alignment) 3057 Alignment = DAG.getEVTAlignment(VT); 3058 3059 AAMDNodes AAInfo; 3060 I.getAAMetadata(AAInfo); 3061 3062 MachineMemOperand *MMO = 3063 DAG.getMachineFunction(). 3064 getMachineMemOperand(MachinePointerInfo(PtrOperand), 3065 MachineMemOperand::MOStore, VT.getStoreSize(), 3066 Alignment, AAInfo); 3067 SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT, 3068 MMO, false); 3069 DAG.setRoot(StoreNode); 3070 setValue(&I, StoreNode); 3071 } 3072 3073 // Gather/scatter receive a vector of pointers. 3074 // This vector of pointers may be represented as a base pointer + vector of 3075 // indices, it depends on GEP and instruction preceeding GEP 3076 // that calculates indices 3077 static bool getUniformBase(Value *& Ptr, SDValue& Base, SDValue& Index, 3078 SelectionDAGBuilder* SDB) { 3079 3080 assert (Ptr->getType()->isVectorTy() && "Uexpected pointer type"); 3081 GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr); 3082 if (!Gep || Gep->getNumOperands() > 2) 3083 return false; 3084 ShuffleVectorInst *ShuffleInst = 3085 dyn_cast<ShuffleVectorInst>(Gep->getPointerOperand()); 3086 if (!ShuffleInst || !ShuffleInst->getMask()->isNullValue() || 3087 cast<Instruction>(ShuffleInst->getOperand(0))->getOpcode() != 3088 Instruction::InsertElement) 3089 return false; 3090 3091 Ptr = cast<InsertElementInst>(ShuffleInst->getOperand(0))->getOperand(1); 3092 3093 SelectionDAG& DAG = SDB->DAG; 3094 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3095 // Check is the Ptr is inside current basic block 3096 // If not, look for the shuffle instruction 3097 if (SDB->findValue(Ptr)) 3098 Base = SDB->getValue(Ptr); 3099 else if (SDB->findValue(ShuffleInst)) { 3100 SDValue ShuffleNode = SDB->getValue(ShuffleInst); 3101 SDLoc sdl = ShuffleNode; 3102 Base = DAG.getNode( 3103 ISD::EXTRACT_VECTOR_ELT, sdl, 3104 ShuffleNode.getValueType().getScalarType(), ShuffleNode, 3105 DAG.getConstant(0, sdl, TLI.getVectorIdxTy(DAG.getDataLayout()))); 3106 SDB->setValue(Ptr, Base); 3107 } 3108 else 3109 return false; 3110 3111 Value *IndexVal = Gep->getOperand(1); 3112 if (SDB->findValue(IndexVal)) { 3113 Index = SDB->getValue(IndexVal); 3114 3115 if (SExtInst* Sext = dyn_cast<SExtInst>(IndexVal)) { 3116 IndexVal = Sext->getOperand(0); 3117 if (SDB->findValue(IndexVal)) 3118 Index = SDB->getValue(IndexVal); 3119 } 3120 return true; 3121 } 3122 return false; 3123 } 3124 3125 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) { 3126 SDLoc sdl = getCurSDLoc(); 3127 3128 // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask) 3129 Value *Ptr = I.getArgOperand(1); 3130 SDValue Src0 = getValue(I.getArgOperand(0)); 3131 SDValue Mask = getValue(I.getArgOperand(3)); 3132 EVT VT = Src0.getValueType(); 3133 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue(); 3134 if (!Alignment) 3135 Alignment = DAG.getEVTAlignment(VT); 3136 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3137 3138 AAMDNodes AAInfo; 3139 I.getAAMetadata(AAInfo); 3140 3141 SDValue Base; 3142 SDValue Index; 3143 Value *BasePtr = Ptr; 3144 bool UniformBase = getUniformBase(BasePtr, Base, Index, this); 3145 3146 Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr; 3147 MachineMemOperand *MMO = DAG.getMachineFunction(). 3148 getMachineMemOperand(MachinePointerInfo(MemOpBasePtr), 3149 MachineMemOperand::MOStore, VT.getStoreSize(), 3150 Alignment, AAInfo); 3151 if (!UniformBase) { 3152 Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); 3153 Index = getValue(Ptr); 3154 } 3155 SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index }; 3156 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl, 3157 Ops, MMO); 3158 DAG.setRoot(Scatter); 3159 setValue(&I, Scatter); 3160 } 3161 3162 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I) { 3163 SDLoc sdl = getCurSDLoc(); 3164 3165 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0) 3166 Value *PtrOperand = I.getArgOperand(0); 3167 SDValue Ptr = getValue(PtrOperand); 3168 SDValue Src0 = getValue(I.getArgOperand(3)); 3169 SDValue Mask = getValue(I.getArgOperand(2)); 3170 3171 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3172 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3173 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue(); 3174 if (!Alignment) 3175 Alignment = DAG.getEVTAlignment(VT); 3176 3177 AAMDNodes AAInfo; 3178 I.getAAMetadata(AAInfo); 3179 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); 3180 3181 SDValue InChain = DAG.getRoot(); 3182 if (AA->pointsToConstantMemory(MemoryLocation( 3183 PtrOperand, AA->getTypeStoreSize(I.getType()), AAInfo))) { 3184 // Do not serialize (non-volatile) loads of constant memory with anything. 3185 InChain = DAG.getEntryNode(); 3186 } 3187 3188 MachineMemOperand *MMO = 3189 DAG.getMachineFunction(). 3190 getMachineMemOperand(MachinePointerInfo(PtrOperand), 3191 MachineMemOperand::MOLoad, VT.getStoreSize(), 3192 Alignment, AAInfo, Ranges); 3193 3194 SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO, 3195 ISD::NON_EXTLOAD); 3196 SDValue OutChain = Load.getValue(1); 3197 DAG.setRoot(OutChain); 3198 setValue(&I, Load); 3199 } 3200 3201 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) { 3202 SDLoc sdl = getCurSDLoc(); 3203 3204 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0) 3205 Value *Ptr = I.getArgOperand(0); 3206 SDValue Src0 = getValue(I.getArgOperand(3)); 3207 SDValue Mask = getValue(I.getArgOperand(2)); 3208 3209 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3210 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3211 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue(); 3212 if (!Alignment) 3213 Alignment = DAG.getEVTAlignment(VT); 3214 3215 AAMDNodes AAInfo; 3216 I.getAAMetadata(AAInfo); 3217 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); 3218 3219 SDValue Root = DAG.getRoot(); 3220 SDValue Base; 3221 SDValue Index; 3222 Value *BasePtr = Ptr; 3223 bool UniformBase = getUniformBase(BasePtr, Base, Index, this); 3224 bool ConstantMemory = false; 3225 if (UniformBase && 3226 AA->pointsToConstantMemory( 3227 MemoryLocation(BasePtr, AA->getTypeStoreSize(I.getType()), AAInfo))) { 3228 // Do not serialize (non-volatile) loads of constant memory with anything. 3229 Root = DAG.getEntryNode(); 3230 ConstantMemory = true; 3231 } 3232 3233 MachineMemOperand *MMO = 3234 DAG.getMachineFunction(). 3235 getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr), 3236 MachineMemOperand::MOLoad, VT.getStoreSize(), 3237 Alignment, AAInfo, Ranges); 3238 3239 if (!UniformBase) { 3240 Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); 3241 Index = getValue(Ptr); 3242 } 3243 SDValue Ops[] = { Root, Src0, Mask, Base, Index }; 3244 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl, 3245 Ops, MMO); 3246 3247 SDValue OutChain = Gather.getValue(1); 3248 if (!ConstantMemory) 3249 PendingLoads.push_back(OutChain); 3250 setValue(&I, Gather); 3251 } 3252 3253 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) { 3254 SDLoc dl = getCurSDLoc(); 3255 AtomicOrdering SuccessOrder = I.getSuccessOrdering(); 3256 AtomicOrdering FailureOrder = I.getFailureOrdering(); 3257 SynchronizationScope Scope = I.getSynchScope(); 3258 3259 SDValue InChain = getRoot(); 3260 3261 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType(); 3262 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other); 3263 SDValue L = DAG.getAtomicCmpSwap( 3264 ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain, 3265 getValue(I.getPointerOperand()), getValue(I.getCompareOperand()), 3266 getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()), 3267 /*Alignment=*/ 0, SuccessOrder, FailureOrder, Scope); 3268 3269 SDValue OutChain = L.getValue(2); 3270 3271 setValue(&I, L); 3272 DAG.setRoot(OutChain); 3273 } 3274 3275 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) { 3276 SDLoc dl = getCurSDLoc(); 3277 ISD::NodeType NT; 3278 switch (I.getOperation()) { 3279 default: llvm_unreachable("Unknown atomicrmw operation"); 3280 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break; 3281 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break; 3282 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break; 3283 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break; 3284 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break; 3285 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break; 3286 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break; 3287 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break; 3288 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break; 3289 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break; 3290 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break; 3291 } 3292 AtomicOrdering Order = I.getOrdering(); 3293 SynchronizationScope Scope = I.getSynchScope(); 3294 3295 SDValue InChain = getRoot(); 3296 3297 SDValue L = 3298 DAG.getAtomic(NT, dl, 3299 getValue(I.getValOperand()).getSimpleValueType(), 3300 InChain, 3301 getValue(I.getPointerOperand()), 3302 getValue(I.getValOperand()), 3303 I.getPointerOperand(), 3304 /* Alignment=*/ 0, Order, Scope); 3305 3306 SDValue OutChain = L.getValue(1); 3307 3308 setValue(&I, L); 3309 DAG.setRoot(OutChain); 3310 } 3311 3312 void SelectionDAGBuilder::visitFence(const FenceInst &I) { 3313 SDLoc dl = getCurSDLoc(); 3314 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3315 SDValue Ops[3]; 3316 Ops[0] = getRoot(); 3317 Ops[1] = DAG.getConstant(I.getOrdering(), dl, 3318 TLI.getPointerTy(DAG.getDataLayout())); 3319 Ops[2] = DAG.getConstant(I.getSynchScope(), dl, 3320 TLI.getPointerTy(DAG.getDataLayout())); 3321 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops)); 3322 } 3323 3324 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) { 3325 SDLoc dl = getCurSDLoc(); 3326 AtomicOrdering Order = I.getOrdering(); 3327 SynchronizationScope Scope = I.getSynchScope(); 3328 3329 SDValue InChain = getRoot(); 3330 3331 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3332 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3333 3334 if (I.getAlignment() < VT.getSizeInBits() / 8) 3335 report_fatal_error("Cannot generate unaligned atomic load"); 3336 3337 MachineMemOperand *MMO = 3338 DAG.getMachineFunction(). 3339 getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 3340 MachineMemOperand::MOVolatile | 3341 MachineMemOperand::MOLoad, 3342 VT.getStoreSize(), 3343 I.getAlignment() ? I.getAlignment() : 3344 DAG.getEVTAlignment(VT)); 3345 3346 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG); 3347 SDValue L = 3348 DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain, 3349 getValue(I.getPointerOperand()), MMO, 3350 Order, Scope); 3351 3352 SDValue OutChain = L.getValue(1); 3353 3354 setValue(&I, L); 3355 DAG.setRoot(OutChain); 3356 } 3357 3358 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) { 3359 SDLoc dl = getCurSDLoc(); 3360 3361 AtomicOrdering Order = I.getOrdering(); 3362 SynchronizationScope Scope = I.getSynchScope(); 3363 3364 SDValue InChain = getRoot(); 3365 3366 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3367 EVT VT = 3368 TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType()); 3369 3370 if (I.getAlignment() < VT.getSizeInBits() / 8) 3371 report_fatal_error("Cannot generate unaligned atomic store"); 3372 3373 SDValue OutChain = 3374 DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT, 3375 InChain, 3376 getValue(I.getPointerOperand()), 3377 getValue(I.getValueOperand()), 3378 I.getPointerOperand(), I.getAlignment(), 3379 Order, Scope); 3380 3381 DAG.setRoot(OutChain); 3382 } 3383 3384 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 3385 /// node. 3386 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I, 3387 unsigned Intrinsic) { 3388 bool HasChain = !I.doesNotAccessMemory(); 3389 bool OnlyLoad = HasChain && I.onlyReadsMemory(); 3390 3391 // Build the operand list. 3392 SmallVector<SDValue, 8> Ops; 3393 if (HasChain) { // If this intrinsic has side-effects, chainify it. 3394 if (OnlyLoad) { 3395 // We don't need to serialize loads against other loads. 3396 Ops.push_back(DAG.getRoot()); 3397 } else { 3398 Ops.push_back(getRoot()); 3399 } 3400 } 3401 3402 // Info is set by getTgtMemInstrinsic 3403 TargetLowering::IntrinsicInfo Info; 3404 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3405 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic); 3406 3407 // Add the intrinsic ID as an integer operand if it's not a target intrinsic. 3408 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID || 3409 Info.opc == ISD::INTRINSIC_W_CHAIN) 3410 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(), 3411 TLI.getPointerTy(DAG.getDataLayout()))); 3412 3413 // Add all operands of the call to the operand list. 3414 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { 3415 SDValue Op = getValue(I.getArgOperand(i)); 3416 Ops.push_back(Op); 3417 } 3418 3419 SmallVector<EVT, 4> ValueVTs; 3420 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs); 3421 3422 if (HasChain) 3423 ValueVTs.push_back(MVT::Other); 3424 3425 SDVTList VTs = DAG.getVTList(ValueVTs); 3426 3427 // Create the node. 3428 SDValue Result; 3429 if (IsTgtIntrinsic) { 3430 // This is target intrinsic that touches memory 3431 Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), 3432 VTs, Ops, Info.memVT, 3433 MachinePointerInfo(Info.ptrVal, Info.offset), 3434 Info.align, Info.vol, 3435 Info.readMem, Info.writeMem, Info.size); 3436 } else if (!HasChain) { 3437 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops); 3438 } else if (!I.getType()->isVoidTy()) { 3439 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops); 3440 } else { 3441 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops); 3442 } 3443 3444 if (HasChain) { 3445 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1); 3446 if (OnlyLoad) 3447 PendingLoads.push_back(Chain); 3448 else 3449 DAG.setRoot(Chain); 3450 } 3451 3452 if (!I.getType()->isVoidTy()) { 3453 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) { 3454 EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy); 3455 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result); 3456 } 3457 3458 setValue(&I, Result); 3459 } 3460 } 3461 3462 /// GetSignificand - Get the significand and build it into a floating-point 3463 /// number with exponent of 1: 3464 /// 3465 /// Op = (Op & 0x007fffff) | 0x3f800000; 3466 /// 3467 /// where Op is the hexadecimal representation of floating point value. 3468 static SDValue 3469 GetSignificand(SelectionDAG &DAG, SDValue Op, SDLoc dl) { 3470 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, 3471 DAG.getConstant(0x007fffff, dl, MVT::i32)); 3472 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1, 3473 DAG.getConstant(0x3f800000, dl, MVT::i32)); 3474 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2); 3475 } 3476 3477 /// GetExponent - Get the exponent: 3478 /// 3479 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127); 3480 /// 3481 /// where Op is the hexadecimal representation of floating point value. 3482 static SDValue 3483 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, 3484 SDLoc dl) { 3485 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, 3486 DAG.getConstant(0x7f800000, dl, MVT::i32)); 3487 SDValue t1 = DAG.getNode( 3488 ISD::SRL, dl, MVT::i32, t0, 3489 DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout()))); 3490 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1, 3491 DAG.getConstant(127, dl, MVT::i32)); 3492 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2); 3493 } 3494 3495 /// getF32Constant - Get 32-bit floating point constant. 3496 static SDValue 3497 getF32Constant(SelectionDAG &DAG, unsigned Flt, SDLoc dl) { 3498 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle, APInt(32, Flt)), dl, 3499 MVT::f32); 3500 } 3501 3502 static SDValue getLimitedPrecisionExp2(SDValue t0, SDLoc dl, 3503 SelectionDAG &DAG) { 3504 // IntegerPartOfX = ((int32_t)(t0); 3505 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0); 3506 3507 // FractionalPartOfX = t0 - (float)IntegerPartOfX; 3508 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); 3509 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1); 3510 3511 // IntegerPartOfX <<= 23; 3512 IntegerPartOfX = DAG.getNode( 3513 ISD::SHL, dl, MVT::i32, IntegerPartOfX, 3514 DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy( 3515 DAG.getDataLayout()))); 3516 3517 SDValue TwoToFractionalPartOfX; 3518 if (LimitFloatPrecision <= 6) { 3519 // For floating-point precision of 6: 3520 // 3521 // TwoToFractionalPartOfX = 3522 // 0.997535578f + 3523 // (0.735607626f + 0.252464424f * x) * x; 3524 // 3525 // error 0.0144103317, which is 6 bits 3526 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3527 getF32Constant(DAG, 0x3e814304, dl)); 3528 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 3529 getF32Constant(DAG, 0x3f3c50c8, dl)); 3530 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3531 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 3532 getF32Constant(DAG, 0x3f7f5e7e, dl)); 3533 } else if (LimitFloatPrecision <= 12) { 3534 // For floating-point precision of 12: 3535 // 3536 // TwoToFractionalPartOfX = 3537 // 0.999892986f + 3538 // (0.696457318f + 3539 // (0.224338339f + 0.792043434e-1f * x) * x) * x; 3540 // 3541 // error 0.000107046256, which is 13 to 14 bits 3542 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3543 getF32Constant(DAG, 0x3da235e3, dl)); 3544 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 3545 getF32Constant(DAG, 0x3e65b8f3, dl)); 3546 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3547 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 3548 getF32Constant(DAG, 0x3f324b07, dl)); 3549 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 3550 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 3551 getF32Constant(DAG, 0x3f7ff8fd, dl)); 3552 } else { // LimitFloatPrecision <= 18 3553 // For floating-point precision of 18: 3554 // 3555 // TwoToFractionalPartOfX = 3556 // 0.999999982f + 3557 // (0.693148872f + 3558 // (0.240227044f + 3559 // (0.554906021e-1f + 3560 // (0.961591928e-2f + 3561 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x; 3562 // error 2.47208000*10^(-7), which is better than 18 bits 3563 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3564 getF32Constant(DAG, 0x3924b03e, dl)); 3565 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 3566 getF32Constant(DAG, 0x3ab24b87, dl)); 3567 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3568 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 3569 getF32Constant(DAG, 0x3c1d8c17, dl)); 3570 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 3571 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 3572 getF32Constant(DAG, 0x3d634a1d, dl)); 3573 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 3574 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 3575 getF32Constant(DAG, 0x3e75fe14, dl)); 3576 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 3577 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10, 3578 getF32Constant(DAG, 0x3f317234, dl)); 3579 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); 3580 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, 3581 getF32Constant(DAG, 0x3f800000, dl)); 3582 } 3583 3584 // Add the exponent into the result in integer domain. 3585 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX); 3586 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3587 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX)); 3588 } 3589 3590 /// expandExp - Lower an exp intrinsic. Handles the special sequences for 3591 /// limited-precision mode. 3592 static SDValue expandExp(SDLoc dl, SDValue Op, SelectionDAG &DAG, 3593 const TargetLowering &TLI) { 3594 if (Op.getValueType() == MVT::f32 && 3595 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 3596 3597 // Put the exponent in the right bit position for later addition to the 3598 // final result: 3599 // 3600 // #define LOG2OFe 1.4426950f 3601 // t0 = Op * LOG2OFe 3602 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op, 3603 getF32Constant(DAG, 0x3fb8aa3b, dl)); 3604 return getLimitedPrecisionExp2(t0, dl, DAG); 3605 } 3606 3607 // No special expansion. 3608 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op); 3609 } 3610 3611 /// expandLog - Lower a log intrinsic. Handles the special sequences for 3612 /// limited-precision mode. 3613 static SDValue expandLog(SDLoc dl, SDValue Op, SelectionDAG &DAG, 3614 const TargetLowering &TLI) { 3615 if (Op.getValueType() == MVT::f32 && 3616 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 3617 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3618 3619 // Scale the exponent by log(2) [0.69314718f]. 3620 SDValue Exp = GetExponent(DAG, Op1, TLI, dl); 3621 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, 3622 getF32Constant(DAG, 0x3f317218, dl)); 3623 3624 // Get the significand and build it into a floating-point number with 3625 // exponent of 1. 3626 SDValue X = GetSignificand(DAG, Op1, dl); 3627 3628 SDValue LogOfMantissa; 3629 if (LimitFloatPrecision <= 6) { 3630 // For floating-point precision of 6: 3631 // 3632 // LogofMantissa = 3633 // -1.1609546f + 3634 // (1.4034025f - 0.23903021f * x) * x; 3635 // 3636 // error 0.0034276066, which is better than 8 bits 3637 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3638 getF32Constant(DAG, 0xbe74c456, dl)); 3639 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 3640 getF32Constant(DAG, 0x3fb3a2b1, dl)); 3641 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 3642 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 3643 getF32Constant(DAG, 0x3f949a29, dl)); 3644 } else if (LimitFloatPrecision <= 12) { 3645 // For floating-point precision of 12: 3646 // 3647 // LogOfMantissa = 3648 // -1.7417939f + 3649 // (2.8212026f + 3650 // (-1.4699568f + 3651 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x; 3652 // 3653 // error 0.000061011436, which is 14 bits 3654 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3655 getF32Constant(DAG, 0xbd67b6d6, dl)); 3656 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 3657 getF32Constant(DAG, 0x3ee4f4b8, dl)); 3658 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 3659 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 3660 getF32Constant(DAG, 0x3fbc278b, dl)); 3661 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3662 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 3663 getF32Constant(DAG, 0x40348e95, dl)); 3664 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 3665 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 3666 getF32Constant(DAG, 0x3fdef31a, dl)); 3667 } else { // LimitFloatPrecision <= 18 3668 // For floating-point precision of 18: 3669 // 3670 // LogOfMantissa = 3671 // -2.1072184f + 3672 // (4.2372794f + 3673 // (-3.7029485f + 3674 // (2.2781945f + 3675 // (-0.87823314f + 3676 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x; 3677 // 3678 // error 0.0000023660568, which is better than 18 bits 3679 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3680 getF32Constant(DAG, 0xbc91e5ac, dl)); 3681 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 3682 getF32Constant(DAG, 0x3e4350aa, dl)); 3683 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 3684 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 3685 getF32Constant(DAG, 0x3f60d3e3, dl)); 3686 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3687 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 3688 getF32Constant(DAG, 0x4011cdf0, dl)); 3689 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 3690 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 3691 getF32Constant(DAG, 0x406cfd1c, dl)); 3692 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 3693 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 3694 getF32Constant(DAG, 0x408797cb, dl)); 3695 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 3696 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, 3697 getF32Constant(DAG, 0x4006dcab, dl)); 3698 } 3699 3700 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa); 3701 } 3702 3703 // No special expansion. 3704 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op); 3705 } 3706 3707 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for 3708 /// limited-precision mode. 3709 static SDValue expandLog2(SDLoc dl, SDValue Op, SelectionDAG &DAG, 3710 const TargetLowering &TLI) { 3711 if (Op.getValueType() == MVT::f32 && 3712 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 3713 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3714 3715 // Get the exponent. 3716 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl); 3717 3718 // Get the significand and build it into a floating-point number with 3719 // exponent of 1. 3720 SDValue X = GetSignificand(DAG, Op1, dl); 3721 3722 // Different possible minimax approximations of significand in 3723 // floating-point for various degrees of accuracy over [1,2]. 3724 SDValue Log2ofMantissa; 3725 if (LimitFloatPrecision <= 6) { 3726 // For floating-point precision of 6: 3727 // 3728 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x; 3729 // 3730 // error 0.0049451742, which is more than 7 bits 3731 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3732 getF32Constant(DAG, 0xbeb08fe0, dl)); 3733 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 3734 getF32Constant(DAG, 0x40019463, dl)); 3735 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 3736 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 3737 getF32Constant(DAG, 0x3fd6633d, dl)); 3738 } else if (LimitFloatPrecision <= 12) { 3739 // For floating-point precision of 12: 3740 // 3741 // Log2ofMantissa = 3742 // -2.51285454f + 3743 // (4.07009056f + 3744 // (-2.12067489f + 3745 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x; 3746 // 3747 // error 0.0000876136000, which is better than 13 bits 3748 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3749 getF32Constant(DAG, 0xbda7262e, dl)); 3750 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 3751 getF32Constant(DAG, 0x3f25280b, dl)); 3752 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 3753 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 3754 getF32Constant(DAG, 0x4007b923, dl)); 3755 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3756 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 3757 getF32Constant(DAG, 0x40823e2f, dl)); 3758 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 3759 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 3760 getF32Constant(DAG, 0x4020d29c, dl)); 3761 } else { // LimitFloatPrecision <= 18 3762 // For floating-point precision of 18: 3763 // 3764 // Log2ofMantissa = 3765 // -3.0400495f + 3766 // (6.1129976f + 3767 // (-5.3420409f + 3768 // (3.2865683f + 3769 // (-1.2669343f + 3770 // (0.27515199f - 3771 // 0.25691327e-1f * x) * x) * x) * x) * x) * x; 3772 // 3773 // error 0.0000018516, which is better than 18 bits 3774 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3775 getF32Constant(DAG, 0xbcd2769e, dl)); 3776 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 3777 getF32Constant(DAG, 0x3e8ce0b9, dl)); 3778 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 3779 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 3780 getF32Constant(DAG, 0x3fa22ae7, dl)); 3781 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3782 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 3783 getF32Constant(DAG, 0x40525723, dl)); 3784 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 3785 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 3786 getF32Constant(DAG, 0x40aaf200, dl)); 3787 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 3788 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 3789 getF32Constant(DAG, 0x40c39dad, dl)); 3790 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 3791 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, 3792 getF32Constant(DAG, 0x4042902c, dl)); 3793 } 3794 3795 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa); 3796 } 3797 3798 // No special expansion. 3799 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op); 3800 } 3801 3802 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for 3803 /// limited-precision mode. 3804 static SDValue expandLog10(SDLoc dl, SDValue Op, SelectionDAG &DAG, 3805 const TargetLowering &TLI) { 3806 if (Op.getValueType() == MVT::f32 && 3807 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 3808 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3809 3810 // Scale the exponent by log10(2) [0.30102999f]. 3811 SDValue Exp = GetExponent(DAG, Op1, TLI, dl); 3812 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, 3813 getF32Constant(DAG, 0x3e9a209a, dl)); 3814 3815 // Get the significand and build it into a floating-point number with 3816 // exponent of 1. 3817 SDValue X = GetSignificand(DAG, Op1, dl); 3818 3819 SDValue Log10ofMantissa; 3820 if (LimitFloatPrecision <= 6) { 3821 // For floating-point precision of 6: 3822 // 3823 // Log10ofMantissa = 3824 // -0.50419619f + 3825 // (0.60948995f - 0.10380950f * x) * x; 3826 // 3827 // error 0.0014886165, which is 6 bits 3828 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3829 getF32Constant(DAG, 0xbdd49a13, dl)); 3830 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 3831 getF32Constant(DAG, 0x3f1c0789, dl)); 3832 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 3833 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 3834 getF32Constant(DAG, 0x3f011300, dl)); 3835 } else if (LimitFloatPrecision <= 12) { 3836 // For floating-point precision of 12: 3837 // 3838 // Log10ofMantissa = 3839 // -0.64831180f + 3840 // (0.91751397f + 3841 // (-0.31664806f + 0.47637168e-1f * x) * x) * x; 3842 // 3843 // error 0.00019228036, which is better than 12 bits 3844 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3845 getF32Constant(DAG, 0x3d431f31, dl)); 3846 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, 3847 getF32Constant(DAG, 0x3ea21fb2, dl)); 3848 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 3849 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 3850 getF32Constant(DAG, 0x3f6ae232, dl)); 3851 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3852 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, 3853 getF32Constant(DAG, 0x3f25f7c3, dl)); 3854 } else { // LimitFloatPrecision <= 18 3855 // For floating-point precision of 18: 3856 // 3857 // Log10ofMantissa = 3858 // -0.84299375f + 3859 // (1.5327582f + 3860 // (-1.0688956f + 3861 // (0.49102474f + 3862 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x; 3863 // 3864 // error 0.0000037995730, which is better than 18 bits 3865 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 3866 getF32Constant(DAG, 0x3c5d51ce, dl)); 3867 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, 3868 getF32Constant(DAG, 0x3e00685a, dl)); 3869 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 3870 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 3871 getF32Constant(DAG, 0x3efb6798, dl)); 3872 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 3873 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, 3874 getF32Constant(DAG, 0x3f88d192, dl)); 3875 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 3876 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 3877 getF32Constant(DAG, 0x3fc4316c, dl)); 3878 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 3879 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8, 3880 getF32Constant(DAG, 0x3f57ce70, dl)); 3881 } 3882 3883 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa); 3884 } 3885 3886 // No special expansion. 3887 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op); 3888 } 3889 3890 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for 3891 /// limited-precision mode. 3892 static SDValue expandExp2(SDLoc dl, SDValue Op, SelectionDAG &DAG, 3893 const TargetLowering &TLI) { 3894 if (Op.getValueType() == MVT::f32 && 3895 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) 3896 return getLimitedPrecisionExp2(Op, dl, DAG); 3897 3898 // No special expansion. 3899 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op); 3900 } 3901 3902 /// visitPow - Lower a pow intrinsic. Handles the special sequences for 3903 /// limited-precision mode with x == 10.0f. 3904 static SDValue expandPow(SDLoc dl, SDValue LHS, SDValue RHS, 3905 SelectionDAG &DAG, const TargetLowering &TLI) { 3906 bool IsExp10 = false; 3907 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 && 3908 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 3909 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) { 3910 APFloat Ten(10.0f); 3911 IsExp10 = LHSC->isExactlyValue(Ten); 3912 } 3913 } 3914 3915 if (IsExp10) { 3916 // Put the exponent in the right bit position for later addition to the 3917 // final result: 3918 // 3919 // #define LOG2OF10 3.3219281f 3920 // t0 = Op * LOG2OF10; 3921 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS, 3922 getF32Constant(DAG, 0x40549a78, dl)); 3923 return getLimitedPrecisionExp2(t0, dl, DAG); 3924 } 3925 3926 // No special expansion. 3927 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS); 3928 } 3929 3930 3931 /// ExpandPowI - Expand a llvm.powi intrinsic. 3932 static SDValue ExpandPowI(SDLoc DL, SDValue LHS, SDValue RHS, 3933 SelectionDAG &DAG) { 3934 // If RHS is a constant, we can expand this out to a multiplication tree, 3935 // otherwise we end up lowering to a call to __powidf2 (for example). When 3936 // optimizing for size, we only want to do this if the expansion would produce 3937 // a small number of multiplies, otherwise we do the full expansion. 3938 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 3939 // Get the exponent as a positive value. 3940 unsigned Val = RHSC->getSExtValue(); 3941 if ((int)Val < 0) Val = -Val; 3942 3943 // powi(x, 0) -> 1.0 3944 if (Val == 0) 3945 return DAG.getConstantFP(1.0, DL, LHS.getValueType()); 3946 3947 const Function *F = DAG.getMachineFunction().getFunction(); 3948 if (!F->hasFnAttribute(Attribute::OptimizeForSize) || 3949 // If optimizing for size, don't insert too many multiplies. This 3950 // inserts up to 5 multiplies. 3951 countPopulation(Val) + Log2_32(Val) < 7) { 3952 // We use the simple binary decomposition method to generate the multiply 3953 // sequence. There are more optimal ways to do this (for example, 3954 // powi(x,15) generates one more multiply than it should), but this has 3955 // the benefit of being both really simple and much better than a libcall. 3956 SDValue Res; // Logically starts equal to 1.0 3957 SDValue CurSquare = LHS; 3958 while (Val) { 3959 if (Val & 1) { 3960 if (Res.getNode()) 3961 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare); 3962 else 3963 Res = CurSquare; // 1.0*CurSquare. 3964 } 3965 3966 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(), 3967 CurSquare, CurSquare); 3968 Val >>= 1; 3969 } 3970 3971 // If the original was negative, invert the result, producing 1/(x*x*x). 3972 if (RHSC->getSExtValue() < 0) 3973 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(), 3974 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res); 3975 return Res; 3976 } 3977 } 3978 3979 // Otherwise, expand to a libcall. 3980 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS); 3981 } 3982 3983 // getTruncatedArgReg - Find underlying register used for an truncated 3984 // argument. 3985 static unsigned getTruncatedArgReg(const SDValue &N) { 3986 if (N.getOpcode() != ISD::TRUNCATE) 3987 return 0; 3988 3989 const SDValue &Ext = N.getOperand(0); 3990 if (Ext.getOpcode() == ISD::AssertZext || 3991 Ext.getOpcode() == ISD::AssertSext) { 3992 const SDValue &CFR = Ext.getOperand(0); 3993 if (CFR.getOpcode() == ISD::CopyFromReg) 3994 return cast<RegisterSDNode>(CFR.getOperand(1))->getReg(); 3995 if (CFR.getOpcode() == ISD::TRUNCATE) 3996 return getTruncatedArgReg(CFR); 3997 } 3998 return 0; 3999 } 4000 4001 /// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function 4002 /// argument, create the corresponding DBG_VALUE machine instruction for it now. 4003 /// At the end of instruction selection, they will be inserted to the entry BB. 4004 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( 4005 const Value *V, DILocalVariable *Variable, DIExpression *Expr, 4006 DILocation *DL, int64_t Offset, bool IsIndirect, const SDValue &N) { 4007 const Argument *Arg = dyn_cast<Argument>(V); 4008 if (!Arg) 4009 return false; 4010 4011 MachineFunction &MF = DAG.getMachineFunction(); 4012 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); 4013 4014 // Ignore inlined function arguments here. 4015 // 4016 // FIXME: Should we be checking DL->inlinedAt() to determine this? 4017 if (!Variable->getScope()->getSubprogram()->describes(MF.getFunction())) 4018 return false; 4019 4020 Optional<MachineOperand> Op; 4021 // Some arguments' frame index is recorded during argument lowering. 4022 if (int FI = FuncInfo.getArgumentFrameIndex(Arg)) 4023 Op = MachineOperand::CreateFI(FI); 4024 4025 if (!Op && N.getNode()) { 4026 unsigned Reg; 4027 if (N.getOpcode() == ISD::CopyFromReg) 4028 Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg(); 4029 else 4030 Reg = getTruncatedArgReg(N); 4031 if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) { 4032 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4033 unsigned PR = RegInfo.getLiveInPhysReg(Reg); 4034 if (PR) 4035 Reg = PR; 4036 } 4037 if (Reg) 4038 Op = MachineOperand::CreateReg(Reg, false); 4039 } 4040 4041 if (!Op) { 4042 // Check if ValueMap has reg number. 4043 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V); 4044 if (VMI != FuncInfo.ValueMap.end()) 4045 Op = MachineOperand::CreateReg(VMI->second, false); 4046 } 4047 4048 if (!Op && N.getNode()) 4049 // Check if frame index is available. 4050 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode())) 4051 if (FrameIndexSDNode *FINode = 4052 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) 4053 Op = MachineOperand::CreateFI(FINode->getIndex()); 4054 4055 if (!Op) 4056 return false; 4057 4058 assert(Variable->isValidLocationForIntrinsic(DL) && 4059 "Expected inlined-at fields to agree"); 4060 if (Op->isReg()) 4061 FuncInfo.ArgDbgValues.push_back( 4062 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect, 4063 Op->getReg(), Offset, Variable, Expr)); 4064 else 4065 FuncInfo.ArgDbgValues.push_back( 4066 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE)) 4067 .addOperand(*Op) 4068 .addImm(Offset) 4069 .addMetadata(Variable) 4070 .addMetadata(Expr)); 4071 4072 return true; 4073 } 4074 4075 // VisualStudio defines setjmp as _setjmp 4076 #if defined(_MSC_VER) && defined(setjmp) && \ 4077 !defined(setjmp_undefined_for_msvc) 4078 # pragma push_macro("setjmp") 4079 # undef setjmp 4080 # define setjmp_undefined_for_msvc 4081 #endif 4082 4083 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If 4084 /// we want to emit this as a call to a named external function, return the name 4085 /// otherwise lower it and return null. 4086 const char * 4087 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { 4088 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4089 SDLoc sdl = getCurSDLoc(); 4090 DebugLoc dl = getCurDebugLoc(); 4091 SDValue Res; 4092 4093 switch (Intrinsic) { 4094 default: 4095 // By default, turn this into a target intrinsic node. 4096 visitTargetIntrinsic(I, Intrinsic); 4097 return nullptr; 4098 case Intrinsic::vastart: visitVAStart(I); return nullptr; 4099 case Intrinsic::vaend: visitVAEnd(I); return nullptr; 4100 case Intrinsic::vacopy: visitVACopy(I); return nullptr; 4101 case Intrinsic::returnaddress: 4102 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, 4103 TLI.getPointerTy(DAG.getDataLayout()), 4104 getValue(I.getArgOperand(0)))); 4105 return nullptr; 4106 case Intrinsic::frameaddress: 4107 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, 4108 TLI.getPointerTy(DAG.getDataLayout()), 4109 getValue(I.getArgOperand(0)))); 4110 return nullptr; 4111 case Intrinsic::read_register: { 4112 Value *Reg = I.getArgOperand(0); 4113 SDValue Chain = getRoot(); 4114 SDValue RegName = 4115 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); 4116 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 4117 Res = DAG.getNode(ISD::READ_REGISTER, sdl, 4118 DAG.getVTList(VT, MVT::Other), Chain, RegName); 4119 setValue(&I, Res); 4120 DAG.setRoot(Res.getValue(1)); 4121 return nullptr; 4122 } 4123 case Intrinsic::write_register: { 4124 Value *Reg = I.getArgOperand(0); 4125 Value *RegValue = I.getArgOperand(1); 4126 SDValue Chain = getRoot(); 4127 SDValue RegName = 4128 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); 4129 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain, 4130 RegName, getValue(RegValue))); 4131 return nullptr; 4132 } 4133 case Intrinsic::setjmp: 4134 return &"_setjmp"[!TLI.usesUnderscoreSetJmp()]; 4135 case Intrinsic::longjmp: 4136 return &"_longjmp"[!TLI.usesUnderscoreLongJmp()]; 4137 case Intrinsic::memcpy: { 4138 // FIXME: this definition of "user defined address space" is x86-specific 4139 // Assert for address < 256 since we support only user defined address 4140 // spaces. 4141 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace() 4142 < 256 && 4143 cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace() 4144 < 256 && 4145 "Unknown address space"); 4146 SDValue Op1 = getValue(I.getArgOperand(0)); 4147 SDValue Op2 = getValue(I.getArgOperand(1)); 4148 SDValue Op3 = getValue(I.getArgOperand(2)); 4149 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); 4150 if (!Align) 4151 Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment. 4152 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue(); 4153 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); 4154 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, 4155 false, isTC, 4156 MachinePointerInfo(I.getArgOperand(0)), 4157 MachinePointerInfo(I.getArgOperand(1))); 4158 updateDAGForMaybeTailCall(MC); 4159 return nullptr; 4160 } 4161 case Intrinsic::memset: { 4162 // FIXME: this definition of "user defined address space" is x86-specific 4163 // Assert for address < 256 since we support only user defined address 4164 // spaces. 4165 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace() 4166 < 256 && 4167 "Unknown address space"); 4168 SDValue Op1 = getValue(I.getArgOperand(0)); 4169 SDValue Op2 = getValue(I.getArgOperand(1)); 4170 SDValue Op3 = getValue(I.getArgOperand(2)); 4171 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); 4172 if (!Align) 4173 Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment. 4174 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue(); 4175 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); 4176 SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, 4177 isTC, MachinePointerInfo(I.getArgOperand(0))); 4178 updateDAGForMaybeTailCall(MS); 4179 return nullptr; 4180 } 4181 case Intrinsic::memmove: { 4182 // FIXME: this definition of "user defined address space" is x86-specific 4183 // Assert for address < 256 since we support only user defined address 4184 // spaces. 4185 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace() 4186 < 256 && 4187 cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace() 4188 < 256 && 4189 "Unknown address space"); 4190 SDValue Op1 = getValue(I.getArgOperand(0)); 4191 SDValue Op2 = getValue(I.getArgOperand(1)); 4192 SDValue Op3 = getValue(I.getArgOperand(2)); 4193 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); 4194 if (!Align) 4195 Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment. 4196 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue(); 4197 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); 4198 SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, 4199 isTC, MachinePointerInfo(I.getArgOperand(0)), 4200 MachinePointerInfo(I.getArgOperand(1))); 4201 updateDAGForMaybeTailCall(MM); 4202 return nullptr; 4203 } 4204 case Intrinsic::dbg_declare: { 4205 const DbgDeclareInst &DI = cast<DbgDeclareInst>(I); 4206 DILocalVariable *Variable = DI.getVariable(); 4207 DIExpression *Expression = DI.getExpression(); 4208 const Value *Address = DI.getAddress(); 4209 assert(Variable && "Missing variable"); 4210 if (!Address) { 4211 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 4212 return nullptr; 4213 } 4214 4215 // Check if address has undef value. 4216 if (isa<UndefValue>(Address) || 4217 (Address->use_empty() && !isa<Argument>(Address))) { 4218 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 4219 return nullptr; 4220 } 4221 4222 SDValue &N = NodeMap[Address]; 4223 if (!N.getNode() && isa<Argument>(Address)) 4224 // Check unused arguments map. 4225 N = UnusedArgNodeMap[Address]; 4226 SDDbgValue *SDV; 4227 if (N.getNode()) { 4228 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address)) 4229 Address = BCI->getOperand(0); 4230 // Parameters are handled specially. 4231 bool isParameter = Variable->getTag() == dwarf::DW_TAG_arg_variable || 4232 isa<Argument>(Address); 4233 4234 const AllocaInst *AI = dyn_cast<AllocaInst>(Address); 4235 4236 if (isParameter && !AI) { 4237 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode()); 4238 if (FINode) 4239 // Byval parameter. We have a frame index at this point. 4240 SDV = DAG.getFrameIndexDbgValue( 4241 Variable, Expression, FINode->getIndex(), 0, dl, SDNodeOrder); 4242 else { 4243 // Address is an argument, so try to emit its dbg value using 4244 // virtual register info from the FuncInfo.ValueMap. 4245 EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false, 4246 N); 4247 return nullptr; 4248 } 4249 } else if (AI) 4250 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(), 4251 true, 0, dl, SDNodeOrder); 4252 else { 4253 // Can't do anything with other non-AI cases yet. 4254 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 4255 DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t"); 4256 DEBUG(Address->dump()); 4257 return nullptr; 4258 } 4259 DAG.AddDbgValue(SDV, N.getNode(), isParameter); 4260 } else { 4261 // If Address is an argument then try to emit its dbg value using 4262 // virtual register info from the FuncInfo.ValueMap. 4263 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false, 4264 N)) { 4265 // If variable is pinned by a alloca in dominating bb then 4266 // use StaticAllocaMap. 4267 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) { 4268 if (AI->getParent() != DI.getParent()) { 4269 DenseMap<const AllocaInst*, int>::iterator SI = 4270 FuncInfo.StaticAllocaMap.find(AI); 4271 if (SI != FuncInfo.StaticAllocaMap.end()) { 4272 SDV = DAG.getFrameIndexDbgValue(Variable, Expression, SI->second, 4273 0, dl, SDNodeOrder); 4274 DAG.AddDbgValue(SDV, nullptr, false); 4275 return nullptr; 4276 } 4277 } 4278 } 4279 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 4280 } 4281 } 4282 return nullptr; 4283 } 4284 case Intrinsic::dbg_value: { 4285 const DbgValueInst &DI = cast<DbgValueInst>(I); 4286 assert(DI.getVariable() && "Missing variable"); 4287 4288 DILocalVariable *Variable = DI.getVariable(); 4289 DIExpression *Expression = DI.getExpression(); 4290 uint64_t Offset = DI.getOffset(); 4291 const Value *V = DI.getValue(); 4292 if (!V) 4293 return nullptr; 4294 4295 SDDbgValue *SDV; 4296 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) { 4297 SDV = DAG.getConstantDbgValue(Variable, Expression, V, Offset, dl, 4298 SDNodeOrder); 4299 DAG.AddDbgValue(SDV, nullptr, false); 4300 } else { 4301 // Do not use getValue() in here; we don't want to generate code at 4302 // this point if it hasn't been done yet. 4303 SDValue N = NodeMap[V]; 4304 if (!N.getNode() && isa<Argument>(V)) 4305 // Check unused arguments map. 4306 N = UnusedArgNodeMap[V]; 4307 if (N.getNode()) { 4308 // A dbg.value for an alloca is always indirect. 4309 bool IsIndirect = isa<AllocaInst>(V) || Offset != 0; 4310 if (!EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset, 4311 IsIndirect, N)) { 4312 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(), 4313 IsIndirect, Offset, dl, SDNodeOrder); 4314 DAG.AddDbgValue(SDV, N.getNode(), false); 4315 } 4316 } else if (!V->use_empty() ) { 4317 // Do not call getValue(V) yet, as we don't want to generate code. 4318 // Remember it for later. 4319 DanglingDebugInfo DDI(&DI, dl, SDNodeOrder); 4320 DanglingDebugInfoMap[V] = DDI; 4321 } else { 4322 // We may expand this to cover more cases. One case where we have no 4323 // data available is an unreferenced parameter. 4324 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 4325 } 4326 } 4327 4328 // Build a debug info table entry. 4329 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V)) 4330 V = BCI->getOperand(0); 4331 const AllocaInst *AI = dyn_cast<AllocaInst>(V); 4332 // Don't handle byval struct arguments or VLAs, for example. 4333 if (!AI) { 4334 DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n"); 4335 DEBUG(dbgs() << " Last seen at:\n " << *V << "\n"); 4336 return nullptr; 4337 } 4338 DenseMap<const AllocaInst*, int>::iterator SI = 4339 FuncInfo.StaticAllocaMap.find(AI); 4340 if (SI == FuncInfo.StaticAllocaMap.end()) 4341 return nullptr; // VLAs. 4342 return nullptr; 4343 } 4344 4345 case Intrinsic::eh_typeid_for: { 4346 // Find the type id for the given typeinfo. 4347 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0)); 4348 unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV); 4349 Res = DAG.getConstant(TypeID, sdl, MVT::i32); 4350 setValue(&I, Res); 4351 return nullptr; 4352 } 4353 4354 case Intrinsic::eh_return_i32: 4355 case Intrinsic::eh_return_i64: 4356 DAG.getMachineFunction().getMMI().setCallsEHReturn(true); 4357 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl, 4358 MVT::Other, 4359 getControlRoot(), 4360 getValue(I.getArgOperand(0)), 4361 getValue(I.getArgOperand(1)))); 4362 return nullptr; 4363 case Intrinsic::eh_unwind_init: 4364 DAG.getMachineFunction().getMMI().setCallsUnwindInit(true); 4365 return nullptr; 4366 case Intrinsic::eh_dwarf_cfa: { 4367 SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl, 4368 TLI.getPointerTy(DAG.getDataLayout())); 4369 SDValue Offset = DAG.getNode(ISD::ADD, sdl, 4370 CfaArg.getValueType(), 4371 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, sdl, 4372 CfaArg.getValueType()), 4373 CfaArg); 4374 SDValue FA = DAG.getNode( 4375 ISD::FRAMEADDR, sdl, TLI.getPointerTy(DAG.getDataLayout()), 4376 DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()))); 4377 setValue(&I, DAG.getNode(ISD::ADD, sdl, FA.getValueType(), 4378 FA, Offset)); 4379 return nullptr; 4380 } 4381 case Intrinsic::eh_sjlj_callsite: { 4382 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 4383 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0)); 4384 assert(CI && "Non-constant call site value in eh.sjlj.callsite!"); 4385 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!"); 4386 4387 MMI.setCurrentCallSite(CI->getZExtValue()); 4388 return nullptr; 4389 } 4390 case Intrinsic::eh_sjlj_functioncontext: { 4391 // Get and store the index of the function context. 4392 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 4393 AllocaInst *FnCtx = 4394 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts()); 4395 int FI = FuncInfo.StaticAllocaMap[FnCtx]; 4396 MFI->setFunctionContextIndex(FI); 4397 return nullptr; 4398 } 4399 case Intrinsic::eh_sjlj_setjmp: { 4400 SDValue Ops[2]; 4401 Ops[0] = getRoot(); 4402 Ops[1] = getValue(I.getArgOperand(0)); 4403 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl, 4404 DAG.getVTList(MVT::i32, MVT::Other), Ops); 4405 setValue(&I, Op.getValue(0)); 4406 DAG.setRoot(Op.getValue(1)); 4407 return nullptr; 4408 } 4409 case Intrinsic::eh_sjlj_longjmp: { 4410 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other, 4411 getRoot(), getValue(I.getArgOperand(0)))); 4412 return nullptr; 4413 } 4414 4415 case Intrinsic::masked_gather: 4416 visitMaskedGather(I); 4417 return nullptr; 4418 case Intrinsic::masked_load: 4419 visitMaskedLoad(I); 4420 return nullptr; 4421 case Intrinsic::masked_scatter: 4422 visitMaskedScatter(I); 4423 return nullptr; 4424 case Intrinsic::masked_store: 4425 visitMaskedStore(I); 4426 return nullptr; 4427 case Intrinsic::x86_mmx_pslli_w: 4428 case Intrinsic::x86_mmx_pslli_d: 4429 case Intrinsic::x86_mmx_pslli_q: 4430 case Intrinsic::x86_mmx_psrli_w: 4431 case Intrinsic::x86_mmx_psrli_d: 4432 case Intrinsic::x86_mmx_psrli_q: 4433 case Intrinsic::x86_mmx_psrai_w: 4434 case Intrinsic::x86_mmx_psrai_d: { 4435 SDValue ShAmt = getValue(I.getArgOperand(1)); 4436 if (isa<ConstantSDNode>(ShAmt)) { 4437 visitTargetIntrinsic(I, Intrinsic); 4438 return nullptr; 4439 } 4440 unsigned NewIntrinsic = 0; 4441 EVT ShAmtVT = MVT::v2i32; 4442 switch (Intrinsic) { 4443 case Intrinsic::x86_mmx_pslli_w: 4444 NewIntrinsic = Intrinsic::x86_mmx_psll_w; 4445 break; 4446 case Intrinsic::x86_mmx_pslli_d: 4447 NewIntrinsic = Intrinsic::x86_mmx_psll_d; 4448 break; 4449 case Intrinsic::x86_mmx_pslli_q: 4450 NewIntrinsic = Intrinsic::x86_mmx_psll_q; 4451 break; 4452 case Intrinsic::x86_mmx_psrli_w: 4453 NewIntrinsic = Intrinsic::x86_mmx_psrl_w; 4454 break; 4455 case Intrinsic::x86_mmx_psrli_d: 4456 NewIntrinsic = Intrinsic::x86_mmx_psrl_d; 4457 break; 4458 case Intrinsic::x86_mmx_psrli_q: 4459 NewIntrinsic = Intrinsic::x86_mmx_psrl_q; 4460 break; 4461 case Intrinsic::x86_mmx_psrai_w: 4462 NewIntrinsic = Intrinsic::x86_mmx_psra_w; 4463 break; 4464 case Intrinsic::x86_mmx_psrai_d: 4465 NewIntrinsic = Intrinsic::x86_mmx_psra_d; 4466 break; 4467 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 4468 } 4469 4470 // The vector shift intrinsics with scalars uses 32b shift amounts but 4471 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits 4472 // to be zero. 4473 // We must do this early because v2i32 is not a legal type. 4474 SDValue ShOps[2]; 4475 ShOps[0] = ShAmt; 4476 ShOps[1] = DAG.getConstant(0, sdl, MVT::i32); 4477 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, ShOps); 4478 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 4479 ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt); 4480 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT, 4481 DAG.getConstant(NewIntrinsic, sdl, MVT::i32), 4482 getValue(I.getArgOperand(0)), ShAmt); 4483 setValue(&I, Res); 4484 return nullptr; 4485 } 4486 case Intrinsic::convertff: 4487 case Intrinsic::convertfsi: 4488 case Intrinsic::convertfui: 4489 case Intrinsic::convertsif: 4490 case Intrinsic::convertuif: 4491 case Intrinsic::convertss: 4492 case Intrinsic::convertsu: 4493 case Intrinsic::convertus: 4494 case Intrinsic::convertuu: { 4495 ISD::CvtCode Code = ISD::CVT_INVALID; 4496 switch (Intrinsic) { 4497 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 4498 case Intrinsic::convertff: Code = ISD::CVT_FF; break; 4499 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break; 4500 case Intrinsic::convertfui: Code = ISD::CVT_FU; break; 4501 case Intrinsic::convertsif: Code = ISD::CVT_SF; break; 4502 case Intrinsic::convertuif: Code = ISD::CVT_UF; break; 4503 case Intrinsic::convertss: Code = ISD::CVT_SS; break; 4504 case Intrinsic::convertsu: Code = ISD::CVT_SU; break; 4505 case Intrinsic::convertus: Code = ISD::CVT_US; break; 4506 case Intrinsic::convertuu: Code = ISD::CVT_UU; break; 4507 } 4508 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 4509 const Value *Op1 = I.getArgOperand(0); 4510 Res = DAG.getConvertRndSat(DestVT, sdl, getValue(Op1), 4511 DAG.getValueType(DestVT), 4512 DAG.getValueType(getValue(Op1).getValueType()), 4513 getValue(I.getArgOperand(1)), 4514 getValue(I.getArgOperand(2)), 4515 Code); 4516 setValue(&I, Res); 4517 return nullptr; 4518 } 4519 case Intrinsic::powi: 4520 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)), 4521 getValue(I.getArgOperand(1)), DAG)); 4522 return nullptr; 4523 case Intrinsic::log: 4524 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 4525 return nullptr; 4526 case Intrinsic::log2: 4527 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 4528 return nullptr; 4529 case Intrinsic::log10: 4530 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 4531 return nullptr; 4532 case Intrinsic::exp: 4533 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 4534 return nullptr; 4535 case Intrinsic::exp2: 4536 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 4537 return nullptr; 4538 case Intrinsic::pow: 4539 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)), 4540 getValue(I.getArgOperand(1)), DAG, TLI)); 4541 return nullptr; 4542 case Intrinsic::sqrt: 4543 case Intrinsic::fabs: 4544 case Intrinsic::sin: 4545 case Intrinsic::cos: 4546 case Intrinsic::floor: 4547 case Intrinsic::ceil: 4548 case Intrinsic::trunc: 4549 case Intrinsic::rint: 4550 case Intrinsic::nearbyint: 4551 case Intrinsic::round: { 4552 unsigned Opcode; 4553 switch (Intrinsic) { 4554 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 4555 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 4556 case Intrinsic::fabs: Opcode = ISD::FABS; break; 4557 case Intrinsic::sin: Opcode = ISD::FSIN; break; 4558 case Intrinsic::cos: Opcode = ISD::FCOS; break; 4559 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 4560 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 4561 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 4562 case Intrinsic::rint: Opcode = ISD::FRINT; break; 4563 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 4564 case Intrinsic::round: Opcode = ISD::FROUND; break; 4565 } 4566 4567 setValue(&I, DAG.getNode(Opcode, sdl, 4568 getValue(I.getArgOperand(0)).getValueType(), 4569 getValue(I.getArgOperand(0)))); 4570 return nullptr; 4571 } 4572 case Intrinsic::minnum: 4573 setValue(&I, DAG.getNode(ISD::FMINNUM, sdl, 4574 getValue(I.getArgOperand(0)).getValueType(), 4575 getValue(I.getArgOperand(0)), 4576 getValue(I.getArgOperand(1)))); 4577 return nullptr; 4578 case Intrinsic::maxnum: 4579 setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl, 4580 getValue(I.getArgOperand(0)).getValueType(), 4581 getValue(I.getArgOperand(0)), 4582 getValue(I.getArgOperand(1)))); 4583 return nullptr; 4584 case Intrinsic::copysign: 4585 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl, 4586 getValue(I.getArgOperand(0)).getValueType(), 4587 getValue(I.getArgOperand(0)), 4588 getValue(I.getArgOperand(1)))); 4589 return nullptr; 4590 case Intrinsic::fma: 4591 setValue(&I, DAG.getNode(ISD::FMA, sdl, 4592 getValue(I.getArgOperand(0)).getValueType(), 4593 getValue(I.getArgOperand(0)), 4594 getValue(I.getArgOperand(1)), 4595 getValue(I.getArgOperand(2)))); 4596 return nullptr; 4597 case Intrinsic::fmuladd: { 4598 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 4599 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 4600 TLI.isFMAFasterThanFMulAndFAdd(VT)) { 4601 setValue(&I, DAG.getNode(ISD::FMA, sdl, 4602 getValue(I.getArgOperand(0)).getValueType(), 4603 getValue(I.getArgOperand(0)), 4604 getValue(I.getArgOperand(1)), 4605 getValue(I.getArgOperand(2)))); 4606 } else { 4607 SDValue Mul = DAG.getNode(ISD::FMUL, sdl, 4608 getValue(I.getArgOperand(0)).getValueType(), 4609 getValue(I.getArgOperand(0)), 4610 getValue(I.getArgOperand(1))); 4611 SDValue Add = DAG.getNode(ISD::FADD, sdl, 4612 getValue(I.getArgOperand(0)).getValueType(), 4613 Mul, 4614 getValue(I.getArgOperand(2))); 4615 setValue(&I, Add); 4616 } 4617 return nullptr; 4618 } 4619 case Intrinsic::convert_to_fp16: 4620 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16, 4621 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16, 4622 getValue(I.getArgOperand(0)), 4623 DAG.getTargetConstant(0, sdl, 4624 MVT::i32)))); 4625 return nullptr; 4626 case Intrinsic::convert_from_fp16: 4627 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl, 4628 TLI.getValueType(DAG.getDataLayout(), I.getType()), 4629 DAG.getNode(ISD::BITCAST, sdl, MVT::f16, 4630 getValue(I.getArgOperand(0))))); 4631 return nullptr; 4632 case Intrinsic::pcmarker: { 4633 SDValue Tmp = getValue(I.getArgOperand(0)); 4634 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp)); 4635 return nullptr; 4636 } 4637 case Intrinsic::readcyclecounter: { 4638 SDValue Op = getRoot(); 4639 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl, 4640 DAG.getVTList(MVT::i64, MVT::Other), Op); 4641 setValue(&I, Res); 4642 DAG.setRoot(Res.getValue(1)); 4643 return nullptr; 4644 } 4645 case Intrinsic::bswap: 4646 setValue(&I, DAG.getNode(ISD::BSWAP, sdl, 4647 getValue(I.getArgOperand(0)).getValueType(), 4648 getValue(I.getArgOperand(0)))); 4649 return nullptr; 4650 case Intrinsic::cttz: { 4651 SDValue Arg = getValue(I.getArgOperand(0)); 4652 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); 4653 EVT Ty = Arg.getValueType(); 4654 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF, 4655 sdl, Ty, Arg)); 4656 return nullptr; 4657 } 4658 case Intrinsic::ctlz: { 4659 SDValue Arg = getValue(I.getArgOperand(0)); 4660 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); 4661 EVT Ty = Arg.getValueType(); 4662 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF, 4663 sdl, Ty, Arg)); 4664 return nullptr; 4665 } 4666 case Intrinsic::ctpop: { 4667 SDValue Arg = getValue(I.getArgOperand(0)); 4668 EVT Ty = Arg.getValueType(); 4669 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg)); 4670 return nullptr; 4671 } 4672 case Intrinsic::stacksave: { 4673 SDValue Op = getRoot(); 4674 Res = DAG.getNode( 4675 ISD::STACKSAVE, sdl, 4676 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op); 4677 setValue(&I, Res); 4678 DAG.setRoot(Res.getValue(1)); 4679 return nullptr; 4680 } 4681 case Intrinsic::stackrestore: { 4682 Res = getValue(I.getArgOperand(0)); 4683 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res)); 4684 return nullptr; 4685 } 4686 case Intrinsic::stackprotector: { 4687 // Emit code into the DAG to store the stack guard onto the stack. 4688 MachineFunction &MF = DAG.getMachineFunction(); 4689 MachineFrameInfo *MFI = MF.getFrameInfo(); 4690 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 4691 SDValue Src, Chain = getRoot(); 4692 const Value *Ptr = cast<LoadInst>(I.getArgOperand(0))->getPointerOperand(); 4693 const GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr); 4694 4695 // See if Ptr is a bitcast. If it is, look through it and see if we can get 4696 // global variable __stack_chk_guard. 4697 if (!GV) 4698 if (const Operator *BC = dyn_cast<Operator>(Ptr)) 4699 if (BC->getOpcode() == Instruction::BitCast) 4700 GV = dyn_cast<GlobalVariable>(BC->getOperand(0)); 4701 4702 if (GV && TLI.useLoadStackGuardNode()) { 4703 // Emit a LOAD_STACK_GUARD node. 4704 MachineSDNode *Node = DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, 4705 sdl, PtrTy, Chain); 4706 MachinePointerInfo MPInfo(GV); 4707 MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1); 4708 unsigned Flags = MachineMemOperand::MOLoad | 4709 MachineMemOperand::MOInvariant; 4710 *MemRefs = MF.getMachineMemOperand(MPInfo, Flags, 4711 PtrTy.getSizeInBits() / 8, 4712 DAG.getEVTAlignment(PtrTy)); 4713 Node->setMemRefs(MemRefs, MemRefs + 1); 4714 4715 // Copy the guard value to a virtual register so that it can be 4716 // retrieved in the epilogue. 4717 Src = SDValue(Node, 0); 4718 const TargetRegisterClass *RC = 4719 TLI.getRegClassFor(Src.getSimpleValueType()); 4720 unsigned Reg = MF.getRegInfo().createVirtualRegister(RC); 4721 4722 SPDescriptor.setGuardReg(Reg); 4723 Chain = DAG.getCopyToReg(Chain, sdl, Reg, Src); 4724 } else { 4725 Src = getValue(I.getArgOperand(0)); // The guard's value. 4726 } 4727 4728 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1)); 4729 4730 int FI = FuncInfo.StaticAllocaMap[Slot]; 4731 MFI->setStackProtectorIndex(FI); 4732 4733 SDValue FIN = DAG.getFrameIndex(FI, PtrTy); 4734 4735 // Store the stack protector onto the stack. 4736 Res = DAG.getStore(Chain, sdl, Src, FIN, 4737 MachinePointerInfo::getFixedStack(FI), 4738 true, false, 0); 4739 setValue(&I, Res); 4740 DAG.setRoot(Res); 4741 return nullptr; 4742 } 4743 case Intrinsic::objectsize: { 4744 // If we don't know by now, we're never going to know. 4745 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1)); 4746 4747 assert(CI && "Non-constant type in __builtin_object_size?"); 4748 4749 SDValue Arg = getValue(I.getCalledValue()); 4750 EVT Ty = Arg.getValueType(); 4751 4752 if (CI->isZero()) 4753 Res = DAG.getConstant(-1ULL, sdl, Ty); 4754 else 4755 Res = DAG.getConstant(0, sdl, Ty); 4756 4757 setValue(&I, Res); 4758 return nullptr; 4759 } 4760 case Intrinsic::annotation: 4761 case Intrinsic::ptr_annotation: 4762 // Drop the intrinsic, but forward the value 4763 setValue(&I, getValue(I.getOperand(0))); 4764 return nullptr; 4765 case Intrinsic::assume: 4766 case Intrinsic::var_annotation: 4767 // Discard annotate attributes and assumptions 4768 return nullptr; 4769 4770 case Intrinsic::init_trampoline: { 4771 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts()); 4772 4773 SDValue Ops[6]; 4774 Ops[0] = getRoot(); 4775 Ops[1] = getValue(I.getArgOperand(0)); 4776 Ops[2] = getValue(I.getArgOperand(1)); 4777 Ops[3] = getValue(I.getArgOperand(2)); 4778 Ops[4] = DAG.getSrcValue(I.getArgOperand(0)); 4779 Ops[5] = DAG.getSrcValue(F); 4780 4781 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops); 4782 4783 DAG.setRoot(Res); 4784 return nullptr; 4785 } 4786 case Intrinsic::adjust_trampoline: { 4787 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl, 4788 TLI.getPointerTy(DAG.getDataLayout()), 4789 getValue(I.getArgOperand(0)))); 4790 return nullptr; 4791 } 4792 case Intrinsic::gcroot: 4793 if (GFI) { 4794 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts(); 4795 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1)); 4796 4797 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode()); 4798 GFI->addStackRoot(FI->getIndex(), TypeMap); 4799 } 4800 return nullptr; 4801 case Intrinsic::gcread: 4802 case Intrinsic::gcwrite: 4803 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!"); 4804 case Intrinsic::flt_rounds: 4805 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32)); 4806 return nullptr; 4807 4808 case Intrinsic::expect: { 4809 // Just replace __builtin_expect(exp, c) with EXP. 4810 setValue(&I, getValue(I.getArgOperand(0))); 4811 return nullptr; 4812 } 4813 4814 case Intrinsic::debugtrap: 4815 case Intrinsic::trap: { 4816 StringRef TrapFuncName = 4817 I.getAttributes() 4818 .getAttribute(AttributeSet::FunctionIndex, "trap-func-name") 4819 .getValueAsString(); 4820 if (TrapFuncName.empty()) { 4821 ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ? 4822 ISD::TRAP : ISD::DEBUGTRAP; 4823 DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot())); 4824 return nullptr; 4825 } 4826 TargetLowering::ArgListTy Args; 4827 4828 TargetLowering::CallLoweringInfo CLI(DAG); 4829 CLI.setDebugLoc(sdl).setChain(getRoot()).setCallee( 4830 CallingConv::C, I.getType(), 4831 DAG.getExternalSymbol(TrapFuncName.data(), 4832 TLI.getPointerTy(DAG.getDataLayout())), 4833 std::move(Args), 0); 4834 4835 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 4836 DAG.setRoot(Result.second); 4837 return nullptr; 4838 } 4839 4840 case Intrinsic::uadd_with_overflow: 4841 case Intrinsic::sadd_with_overflow: 4842 case Intrinsic::usub_with_overflow: 4843 case Intrinsic::ssub_with_overflow: 4844 case Intrinsic::umul_with_overflow: 4845 case Intrinsic::smul_with_overflow: { 4846 ISD::NodeType Op; 4847 switch (Intrinsic) { 4848 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 4849 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break; 4850 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break; 4851 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break; 4852 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break; 4853 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break; 4854 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break; 4855 } 4856 SDValue Op1 = getValue(I.getArgOperand(0)); 4857 SDValue Op2 = getValue(I.getArgOperand(1)); 4858 4859 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1); 4860 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2)); 4861 return nullptr; 4862 } 4863 case Intrinsic::prefetch: { 4864 SDValue Ops[5]; 4865 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); 4866 Ops[0] = getRoot(); 4867 Ops[1] = getValue(I.getArgOperand(0)); 4868 Ops[2] = getValue(I.getArgOperand(1)); 4869 Ops[3] = getValue(I.getArgOperand(2)); 4870 Ops[4] = getValue(I.getArgOperand(3)); 4871 DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl, 4872 DAG.getVTList(MVT::Other), Ops, 4873 EVT::getIntegerVT(*Context, 8), 4874 MachinePointerInfo(I.getArgOperand(0)), 4875 0, /* align */ 4876 false, /* volatile */ 4877 rw==0, /* read */ 4878 rw==1)); /* write */ 4879 return nullptr; 4880 } 4881 case Intrinsic::lifetime_start: 4882 case Intrinsic::lifetime_end: { 4883 bool IsStart = (Intrinsic == Intrinsic::lifetime_start); 4884 // Stack coloring is not enabled in O0, discard region information. 4885 if (TM.getOptLevel() == CodeGenOpt::None) 4886 return nullptr; 4887 4888 SmallVector<Value *, 4> Allocas; 4889 GetUnderlyingObjects(I.getArgOperand(1), Allocas, *DL); 4890 4891 for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(), 4892 E = Allocas.end(); Object != E; ++Object) { 4893 AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object); 4894 4895 // Could not find an Alloca. 4896 if (!LifetimeObject) 4897 continue; 4898 4899 // First check that the Alloca is static, otherwise it won't have a 4900 // valid frame index. 4901 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject); 4902 if (SI == FuncInfo.StaticAllocaMap.end()) 4903 return nullptr; 4904 4905 int FI = SI->second; 4906 4907 SDValue Ops[2]; 4908 Ops[0] = getRoot(); 4909 Ops[1] = 4910 DAG.getFrameIndex(FI, TLI.getPointerTy(DAG.getDataLayout()), true); 4911 unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END); 4912 4913 Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops); 4914 DAG.setRoot(Res); 4915 } 4916 return nullptr; 4917 } 4918 case Intrinsic::invariant_start: 4919 // Discard region information. 4920 setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout()))); 4921 return nullptr; 4922 case Intrinsic::invariant_end: 4923 // Discard region information. 4924 return nullptr; 4925 case Intrinsic::stackprotectorcheck: { 4926 // Do not actually emit anything for this basic block. Instead we initialize 4927 // the stack protector descriptor and export the guard variable so we can 4928 // access it in FinishBasicBlock. 4929 const BasicBlock *BB = I.getParent(); 4930 SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB], I); 4931 ExportFromCurrentBlock(SPDescriptor.getGuard()); 4932 4933 // Flush our exports since we are going to process a terminator. 4934 (void)getControlRoot(); 4935 return nullptr; 4936 } 4937 case Intrinsic::clear_cache: 4938 return TLI.getClearCacheBuiltinName(); 4939 case Intrinsic::eh_actions: 4940 setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout()))); 4941 return nullptr; 4942 case Intrinsic::donothing: 4943 // ignore 4944 return nullptr; 4945 case Intrinsic::experimental_stackmap: { 4946 visitStackmap(I); 4947 return nullptr; 4948 } 4949 case Intrinsic::experimental_patchpoint_void: 4950 case Intrinsic::experimental_patchpoint_i64: { 4951 visitPatchpoint(&I); 4952 return nullptr; 4953 } 4954 case Intrinsic::experimental_gc_statepoint: { 4955 visitStatepoint(I); 4956 return nullptr; 4957 } 4958 case Intrinsic::experimental_gc_result_int: 4959 case Intrinsic::experimental_gc_result_float: 4960 case Intrinsic::experimental_gc_result_ptr: 4961 case Intrinsic::experimental_gc_result: { 4962 visitGCResult(I); 4963 return nullptr; 4964 } 4965 case Intrinsic::experimental_gc_relocate: { 4966 visitGCRelocate(I); 4967 return nullptr; 4968 } 4969 case Intrinsic::instrprof_increment: 4970 llvm_unreachable("instrprof failed to lower an increment"); 4971 4972 case Intrinsic::localescape: { 4973 MachineFunction &MF = DAG.getMachineFunction(); 4974 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); 4975 4976 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission 4977 // is the same on all targets. 4978 for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) { 4979 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts(); 4980 if (isa<ConstantPointerNull>(Arg)) 4981 continue; // Skip null pointers. They represent a hole in index space. 4982 AllocaInst *Slot = cast<AllocaInst>(Arg); 4983 assert(FuncInfo.StaticAllocaMap.count(Slot) && 4984 "can only escape static allocas"); 4985 int FI = FuncInfo.StaticAllocaMap[Slot]; 4986 MCSymbol *FrameAllocSym = 4987 MF.getMMI().getContext().getOrCreateFrameAllocSymbol( 4988 GlobalValue::getRealLinkageName(MF.getName()), Idx); 4989 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl, 4990 TII->get(TargetOpcode::LOCAL_ESCAPE)) 4991 .addSym(FrameAllocSym) 4992 .addFrameIndex(FI); 4993 } 4994 4995 return nullptr; 4996 } 4997 4998 case Intrinsic::localrecover: { 4999 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx) 5000 MachineFunction &MF = DAG.getMachineFunction(); 5001 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0); 5002 5003 // Get the symbol that defines the frame offset. 5004 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts()); 5005 auto *Idx = cast<ConstantInt>(I.getArgOperand(2)); 5006 unsigned IdxVal = unsigned(Idx->getLimitedValue(INT_MAX)); 5007 MCSymbol *FrameAllocSym = 5008 MF.getMMI().getContext().getOrCreateFrameAllocSymbol( 5009 GlobalValue::getRealLinkageName(Fn->getName()), IdxVal); 5010 5011 // Create a MCSymbol for the label to avoid any target lowering 5012 // that would make this PC relative. 5013 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT); 5014 SDValue OffsetVal = 5015 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym); 5016 5017 // Add the offset to the FP. 5018 Value *FP = I.getArgOperand(1); 5019 SDValue FPVal = getValue(FP); 5020 SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal); 5021 setValue(&I, Add); 5022 5023 return nullptr; 5024 } 5025 case Intrinsic::eh_begincatch: 5026 case Intrinsic::eh_endcatch: 5027 llvm_unreachable("begin/end catch intrinsics not lowered in codegen"); 5028 case Intrinsic::eh_exceptioncode: { 5029 unsigned Reg = TLI.getExceptionPointerRegister(); 5030 assert(Reg && "cannot get exception code on this platform"); 5031 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); 5032 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT); 5033 assert(FuncInfo.MBB->isLandingPad() && "eh.exceptioncode in non-lpad"); 5034 unsigned VReg = FuncInfo.MBB->addLiveIn(Reg, PtrRC); 5035 SDValue N = 5036 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT); 5037 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32); 5038 setValue(&I, N); 5039 return nullptr; 5040 } 5041 } 5042 } 5043 5044 std::pair<SDValue, SDValue> 5045 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI, 5046 MachineBasicBlock *LandingPad) { 5047 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 5048 MCSymbol *BeginLabel = nullptr; 5049 5050 if (LandingPad) { 5051 // Insert a label before the invoke call to mark the try range. This can be 5052 // used to detect deletion of the invoke via the MachineModuleInfo. 5053 BeginLabel = MMI.getContext().createTempSymbol(); 5054 5055 // For SjLj, keep track of which landing pads go with which invokes 5056 // so as to maintain the ordering of pads in the LSDA. 5057 unsigned CallSiteIndex = MMI.getCurrentCallSite(); 5058 if (CallSiteIndex) { 5059 MMI.setCallSiteBeginLabel(BeginLabel, CallSiteIndex); 5060 LPadToCallSiteMap[LandingPad].push_back(CallSiteIndex); 5061 5062 // Now that the call site is handled, stop tracking it. 5063 MMI.setCurrentCallSite(0); 5064 } 5065 5066 // Both PendingLoads and PendingExports must be flushed here; 5067 // this call might not return. 5068 (void)getRoot(); 5069 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel)); 5070 5071 CLI.setChain(getRoot()); 5072 } 5073 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5074 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 5075 5076 assert((CLI.IsTailCall || Result.second.getNode()) && 5077 "Non-null chain expected with non-tail call!"); 5078 assert((Result.second.getNode() || !Result.first.getNode()) && 5079 "Null value expected with tail call!"); 5080 5081 if (!Result.second.getNode()) { 5082 // As a special case, a null chain means that a tail call has been emitted 5083 // and the DAG root is already updated. 5084 HasTailCall = true; 5085 5086 // Since there's no actual continuation from this block, nothing can be 5087 // relying on us setting vregs for them. 5088 PendingExports.clear(); 5089 } else { 5090 DAG.setRoot(Result.second); 5091 } 5092 5093 if (LandingPad) { 5094 // Insert a label at the end of the invoke call to mark the try range. This 5095 // can be used to detect deletion of the invoke via the MachineModuleInfo. 5096 MCSymbol *EndLabel = MMI.getContext().createTempSymbol(); 5097 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel)); 5098 5099 // Inform MachineModuleInfo of range. 5100 MMI.addInvoke(LandingPad, BeginLabel, EndLabel); 5101 } 5102 5103 return Result; 5104 } 5105 5106 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, 5107 bool isTailCall, 5108 MachineBasicBlock *LandingPad) { 5109 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); 5110 FunctionType *FTy = cast<FunctionType>(PT->getElementType()); 5111 Type *RetTy = FTy->getReturnType(); 5112 5113 TargetLowering::ArgListTy Args; 5114 TargetLowering::ArgListEntry Entry; 5115 Args.reserve(CS.arg_size()); 5116 5117 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 5118 i != e; ++i) { 5119 const Value *V = *i; 5120 5121 // Skip empty types 5122 if (V->getType()->isEmptyTy()) 5123 continue; 5124 5125 SDValue ArgNode = getValue(V); 5126 Entry.Node = ArgNode; Entry.Ty = V->getType(); 5127 5128 // Skip the first return-type Attribute to get to params. 5129 Entry.setAttributes(&CS, i - CS.arg_begin() + 1); 5130 Args.push_back(Entry); 5131 5132 // If we have an explicit sret argument that is an Instruction, (i.e., it 5133 // might point to function-local memory), we can't meaningfully tail-call. 5134 if (Entry.isSRet && isa<Instruction>(V)) 5135 isTailCall = false; 5136 } 5137 5138 // Check if target-independent constraints permit a tail call here. 5139 // Target-dependent constraints are checked within TLI->LowerCallTo. 5140 if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget())) 5141 isTailCall = false; 5142 5143 TargetLowering::CallLoweringInfo CLI(DAG); 5144 CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot()) 5145 .setCallee(RetTy, FTy, Callee, std::move(Args), CS) 5146 .setTailCall(isTailCall); 5147 std::pair<SDValue,SDValue> Result = lowerInvokable(CLI, LandingPad); 5148 5149 if (Result.first.getNode()) 5150 setValue(CS.getInstruction(), Result.first); 5151 } 5152 5153 /// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the 5154 /// value is equal or not-equal to zero. 5155 static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) { 5156 for (const User *U : V->users()) { 5157 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 5158 if (IC->isEquality()) 5159 if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 5160 if (C->isNullValue()) 5161 continue; 5162 // Unknown instruction. 5163 return false; 5164 } 5165 return true; 5166 } 5167 5168 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, 5169 Type *LoadTy, 5170 SelectionDAGBuilder &Builder) { 5171 5172 // Check to see if this load can be trivially constant folded, e.g. if the 5173 // input is from a string literal. 5174 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) { 5175 // Cast pointer to the type we really want to load. 5176 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput), 5177 PointerType::getUnqual(LoadTy)); 5178 5179 if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr( 5180 const_cast<Constant *>(LoadInput), *Builder.DL)) 5181 return Builder.getValue(LoadCst); 5182 } 5183 5184 // Otherwise, we have to emit the load. If the pointer is to unfoldable but 5185 // still constant memory, the input chain can be the entry node. 5186 SDValue Root; 5187 bool ConstantMemory = false; 5188 5189 // Do not serialize (non-volatile) loads of constant memory with anything. 5190 if (Builder.AA->pointsToConstantMemory(PtrVal)) { 5191 Root = Builder.DAG.getEntryNode(); 5192 ConstantMemory = true; 5193 } else { 5194 // Do not serialize non-volatile loads against each other. 5195 Root = Builder.DAG.getRoot(); 5196 } 5197 5198 SDValue Ptr = Builder.getValue(PtrVal); 5199 SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, 5200 Ptr, MachinePointerInfo(PtrVal), 5201 false /*volatile*/, 5202 false /*nontemporal*/, 5203 false /*isinvariant*/, 1 /* align=1 */); 5204 5205 if (!ConstantMemory) 5206 Builder.PendingLoads.push_back(LoadVal.getValue(1)); 5207 return LoadVal; 5208 } 5209 5210 /// processIntegerCallValue - Record the value for an instruction that 5211 /// produces an integer result, converting the type where necessary. 5212 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I, 5213 SDValue Value, 5214 bool IsSigned) { 5215 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 5216 I.getType(), true); 5217 if (IsSigned) 5218 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT); 5219 else 5220 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT); 5221 setValue(&I, Value); 5222 } 5223 5224 /// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form. 5225 /// If so, return true and lower it, otherwise return false and it will be 5226 /// lowered like a normal call. 5227 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) { 5228 // Verify that the prototype makes sense. int memcmp(void*,void*,size_t) 5229 if (I.getNumArgOperands() != 3) 5230 return false; 5231 5232 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1); 5233 if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() || 5234 !I.getArgOperand(2)->getType()->isIntegerTy() || 5235 !I.getType()->isIntegerTy()) 5236 return false; 5237 5238 const Value *Size = I.getArgOperand(2); 5239 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size); 5240 if (CSize && CSize->getZExtValue() == 0) { 5241 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 5242 I.getType(), true); 5243 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT)); 5244 return true; 5245 } 5246 5247 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5248 std::pair<SDValue, SDValue> Res = 5249 TSI.EmitTargetCodeForMemcmp(DAG, getCurSDLoc(), DAG.getRoot(), 5250 getValue(LHS), getValue(RHS), getValue(Size), 5251 MachinePointerInfo(LHS), 5252 MachinePointerInfo(RHS)); 5253 if (Res.first.getNode()) { 5254 processIntegerCallValue(I, Res.first, true); 5255 PendingLoads.push_back(Res.second); 5256 return true; 5257 } 5258 5259 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0 5260 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0 5261 if (CSize && IsOnlyUsedInZeroEqualityComparison(&I)) { 5262 bool ActuallyDoIt = true; 5263 MVT LoadVT; 5264 Type *LoadTy; 5265 switch (CSize->getZExtValue()) { 5266 default: 5267 LoadVT = MVT::Other; 5268 LoadTy = nullptr; 5269 ActuallyDoIt = false; 5270 break; 5271 case 2: 5272 LoadVT = MVT::i16; 5273 LoadTy = Type::getInt16Ty(CSize->getContext()); 5274 break; 5275 case 4: 5276 LoadVT = MVT::i32; 5277 LoadTy = Type::getInt32Ty(CSize->getContext()); 5278 break; 5279 case 8: 5280 LoadVT = MVT::i64; 5281 LoadTy = Type::getInt64Ty(CSize->getContext()); 5282 break; 5283 /* 5284 case 16: 5285 LoadVT = MVT::v4i32; 5286 LoadTy = Type::getInt32Ty(CSize->getContext()); 5287 LoadTy = VectorType::get(LoadTy, 4); 5288 break; 5289 */ 5290 } 5291 5292 // This turns into unaligned loads. We only do this if the target natively 5293 // supports the MVT we'll be loading or if it is small enough (<= 4) that 5294 // we'll only produce a small number of byte loads. 5295 5296 // Require that we can find a legal MVT, and only do this if the target 5297 // supports unaligned loads of that type. Expanding into byte loads would 5298 // bloat the code. 5299 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5300 if (ActuallyDoIt && CSize->getZExtValue() > 4) { 5301 unsigned DstAS = LHS->getType()->getPointerAddressSpace(); 5302 unsigned SrcAS = RHS->getType()->getPointerAddressSpace(); 5303 // TODO: Handle 5 byte compare as 4-byte + 1 byte. 5304 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads. 5305 // TODO: Check alignment of src and dest ptrs. 5306 if (!TLI.isTypeLegal(LoadVT) || 5307 !TLI.allowsMisalignedMemoryAccesses(LoadVT, SrcAS) || 5308 !TLI.allowsMisalignedMemoryAccesses(LoadVT, DstAS)) 5309 ActuallyDoIt = false; 5310 } 5311 5312 if (ActuallyDoIt) { 5313 SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this); 5314 SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this); 5315 5316 SDValue Res = DAG.getSetCC(getCurSDLoc(), MVT::i1, LHSVal, RHSVal, 5317 ISD::SETNE); 5318 processIntegerCallValue(I, Res, false); 5319 return true; 5320 } 5321 } 5322 5323 5324 return false; 5325 } 5326 5327 /// visitMemChrCall -- See if we can lower a memchr call into an optimized 5328 /// form. If so, return true and lower it, otherwise return false and it 5329 /// will be lowered like a normal call. 5330 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) { 5331 // Verify that the prototype makes sense. void *memchr(void *, int, size_t) 5332 if (I.getNumArgOperands() != 3) 5333 return false; 5334 5335 const Value *Src = I.getArgOperand(0); 5336 const Value *Char = I.getArgOperand(1); 5337 const Value *Length = I.getArgOperand(2); 5338 if (!Src->getType()->isPointerTy() || 5339 !Char->getType()->isIntegerTy() || 5340 !Length->getType()->isIntegerTy() || 5341 !I.getType()->isPointerTy()) 5342 return false; 5343 5344 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5345 std::pair<SDValue, SDValue> Res = 5346 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(), 5347 getValue(Src), getValue(Char), getValue(Length), 5348 MachinePointerInfo(Src)); 5349 if (Res.first.getNode()) { 5350 setValue(&I, Res.first); 5351 PendingLoads.push_back(Res.second); 5352 return true; 5353 } 5354 5355 return false; 5356 } 5357 5358 /// visitStrCpyCall -- See if we can lower a strcpy or stpcpy call into an 5359 /// optimized form. If so, return true and lower it, otherwise return false 5360 /// and it will be lowered like a normal call. 5361 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) { 5362 // Verify that the prototype makes sense. char *strcpy(char *, char *) 5363 if (I.getNumArgOperands() != 2) 5364 return false; 5365 5366 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 5367 if (!Arg0->getType()->isPointerTy() || 5368 !Arg1->getType()->isPointerTy() || 5369 !I.getType()->isPointerTy()) 5370 return false; 5371 5372 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5373 std::pair<SDValue, SDValue> Res = 5374 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(), 5375 getValue(Arg0), getValue(Arg1), 5376 MachinePointerInfo(Arg0), 5377 MachinePointerInfo(Arg1), isStpcpy); 5378 if (Res.first.getNode()) { 5379 setValue(&I, Res.first); 5380 DAG.setRoot(Res.second); 5381 return true; 5382 } 5383 5384 return false; 5385 } 5386 5387 /// visitStrCmpCall - See if we can lower a call to strcmp in an optimized form. 5388 /// If so, return true and lower it, otherwise return false and it will be 5389 /// lowered like a normal call. 5390 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) { 5391 // Verify that the prototype makes sense. int strcmp(void*,void*) 5392 if (I.getNumArgOperands() != 2) 5393 return false; 5394 5395 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 5396 if (!Arg0->getType()->isPointerTy() || 5397 !Arg1->getType()->isPointerTy() || 5398 !I.getType()->isIntegerTy()) 5399 return false; 5400 5401 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5402 std::pair<SDValue, SDValue> Res = 5403 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(), 5404 getValue(Arg0), getValue(Arg1), 5405 MachinePointerInfo(Arg0), 5406 MachinePointerInfo(Arg1)); 5407 if (Res.first.getNode()) { 5408 processIntegerCallValue(I, Res.first, true); 5409 PendingLoads.push_back(Res.second); 5410 return true; 5411 } 5412 5413 return false; 5414 } 5415 5416 /// visitStrLenCall -- See if we can lower a strlen call into an optimized 5417 /// form. If so, return true and lower it, otherwise return false and it 5418 /// will be lowered like a normal call. 5419 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) { 5420 // Verify that the prototype makes sense. size_t strlen(char *) 5421 if (I.getNumArgOperands() != 1) 5422 return false; 5423 5424 const Value *Arg0 = I.getArgOperand(0); 5425 if (!Arg0->getType()->isPointerTy() || !I.getType()->isIntegerTy()) 5426 return false; 5427 5428 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5429 std::pair<SDValue, SDValue> Res = 5430 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(), 5431 getValue(Arg0), MachinePointerInfo(Arg0)); 5432 if (Res.first.getNode()) { 5433 processIntegerCallValue(I, Res.first, false); 5434 PendingLoads.push_back(Res.second); 5435 return true; 5436 } 5437 5438 return false; 5439 } 5440 5441 /// visitStrNLenCall -- See if we can lower a strnlen call into an optimized 5442 /// form. If so, return true and lower it, otherwise return false and it 5443 /// will be lowered like a normal call. 5444 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) { 5445 // Verify that the prototype makes sense. size_t strnlen(char *, size_t) 5446 if (I.getNumArgOperands() != 2) 5447 return false; 5448 5449 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 5450 if (!Arg0->getType()->isPointerTy() || 5451 !Arg1->getType()->isIntegerTy() || 5452 !I.getType()->isIntegerTy()) 5453 return false; 5454 5455 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo(); 5456 std::pair<SDValue, SDValue> Res = 5457 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(), 5458 getValue(Arg0), getValue(Arg1), 5459 MachinePointerInfo(Arg0)); 5460 if (Res.first.getNode()) { 5461 processIntegerCallValue(I, Res.first, false); 5462 PendingLoads.push_back(Res.second); 5463 return true; 5464 } 5465 5466 return false; 5467 } 5468 5469 /// visitUnaryFloatCall - If a call instruction is a unary floating-point 5470 /// operation (as expected), translate it to an SDNode with the specified opcode 5471 /// and return true. 5472 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I, 5473 unsigned Opcode) { 5474 // Sanity check that it really is a unary floating-point call. 5475 if (I.getNumArgOperands() != 1 || 5476 !I.getArgOperand(0)->getType()->isFloatingPointTy() || 5477 I.getType() != I.getArgOperand(0)->getType() || 5478 !I.onlyReadsMemory()) 5479 return false; 5480 5481 SDValue Tmp = getValue(I.getArgOperand(0)); 5482 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp)); 5483 return true; 5484 } 5485 5486 /// visitBinaryFloatCall - If a call instruction is a binary floating-point 5487 /// operation (as expected), translate it to an SDNode with the specified opcode 5488 /// and return true. 5489 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I, 5490 unsigned Opcode) { 5491 // Sanity check that it really is a binary floating-point call. 5492 if (I.getNumArgOperands() != 2 || 5493 !I.getArgOperand(0)->getType()->isFloatingPointTy() || 5494 I.getType() != I.getArgOperand(0)->getType() || 5495 I.getType() != I.getArgOperand(1)->getType() || 5496 !I.onlyReadsMemory()) 5497 return false; 5498 5499 SDValue Tmp0 = getValue(I.getArgOperand(0)); 5500 SDValue Tmp1 = getValue(I.getArgOperand(1)); 5501 EVT VT = Tmp0.getValueType(); 5502 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1)); 5503 return true; 5504 } 5505 5506 void SelectionDAGBuilder::visitCall(const CallInst &I) { 5507 // Handle inline assembly differently. 5508 if (isa<InlineAsm>(I.getCalledValue())) { 5509 visitInlineAsm(&I); 5510 return; 5511 } 5512 5513 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 5514 ComputeUsesVAFloatArgument(I, &MMI); 5515 5516 const char *RenameFn = nullptr; 5517 if (Function *F = I.getCalledFunction()) { 5518 if (F->isDeclaration()) { 5519 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) { 5520 if (unsigned IID = II->getIntrinsicID(F)) { 5521 RenameFn = visitIntrinsicCall(I, IID); 5522 if (!RenameFn) 5523 return; 5524 } 5525 } 5526 if (Intrinsic::ID IID = F->getIntrinsicID()) { 5527 RenameFn = visitIntrinsicCall(I, IID); 5528 if (!RenameFn) 5529 return; 5530 } 5531 } 5532 5533 // Check for well-known libc/libm calls. If the function is internal, it 5534 // can't be a library call. 5535 LibFunc::Func Func; 5536 if (!F->hasLocalLinkage() && F->hasName() && 5537 LibInfo->getLibFunc(F->getName(), Func) && 5538 LibInfo->hasOptimizedCodeGen(Func)) { 5539 switch (Func) { 5540 default: break; 5541 case LibFunc::copysign: 5542 case LibFunc::copysignf: 5543 case LibFunc::copysignl: 5544 if (I.getNumArgOperands() == 2 && // Basic sanity checks. 5545 I.getArgOperand(0)->getType()->isFloatingPointTy() && 5546 I.getType() == I.getArgOperand(0)->getType() && 5547 I.getType() == I.getArgOperand(1)->getType() && 5548 I.onlyReadsMemory()) { 5549 SDValue LHS = getValue(I.getArgOperand(0)); 5550 SDValue RHS = getValue(I.getArgOperand(1)); 5551 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(), 5552 LHS.getValueType(), LHS, RHS)); 5553 return; 5554 } 5555 break; 5556 case LibFunc::fabs: 5557 case LibFunc::fabsf: 5558 case LibFunc::fabsl: 5559 if (visitUnaryFloatCall(I, ISD::FABS)) 5560 return; 5561 break; 5562 case LibFunc::fmin: 5563 case LibFunc::fminf: 5564 case LibFunc::fminl: 5565 if (visitBinaryFloatCall(I, ISD::FMINNUM)) 5566 return; 5567 break; 5568 case LibFunc::fmax: 5569 case LibFunc::fmaxf: 5570 case LibFunc::fmaxl: 5571 if (visitBinaryFloatCall(I, ISD::FMAXNUM)) 5572 return; 5573 break; 5574 case LibFunc::sin: 5575 case LibFunc::sinf: 5576 case LibFunc::sinl: 5577 if (visitUnaryFloatCall(I, ISD::FSIN)) 5578 return; 5579 break; 5580 case LibFunc::cos: 5581 case LibFunc::cosf: 5582 case LibFunc::cosl: 5583 if (visitUnaryFloatCall(I, ISD::FCOS)) 5584 return; 5585 break; 5586 case LibFunc::sqrt: 5587 case LibFunc::sqrtf: 5588 case LibFunc::sqrtl: 5589 case LibFunc::sqrt_finite: 5590 case LibFunc::sqrtf_finite: 5591 case LibFunc::sqrtl_finite: 5592 if (visitUnaryFloatCall(I, ISD::FSQRT)) 5593 return; 5594 break; 5595 case LibFunc::floor: 5596 case LibFunc::floorf: 5597 case LibFunc::floorl: 5598 if (visitUnaryFloatCall(I, ISD::FFLOOR)) 5599 return; 5600 break; 5601 case LibFunc::nearbyint: 5602 case LibFunc::nearbyintf: 5603 case LibFunc::nearbyintl: 5604 if (visitUnaryFloatCall(I, ISD::FNEARBYINT)) 5605 return; 5606 break; 5607 case LibFunc::ceil: 5608 case LibFunc::ceilf: 5609 case LibFunc::ceill: 5610 if (visitUnaryFloatCall(I, ISD::FCEIL)) 5611 return; 5612 break; 5613 case LibFunc::rint: 5614 case LibFunc::rintf: 5615 case LibFunc::rintl: 5616 if (visitUnaryFloatCall(I, ISD::FRINT)) 5617 return; 5618 break; 5619 case LibFunc::round: 5620 case LibFunc::roundf: 5621 case LibFunc::roundl: 5622 if (visitUnaryFloatCall(I, ISD::FROUND)) 5623 return; 5624 break; 5625 case LibFunc::trunc: 5626 case LibFunc::truncf: 5627 case LibFunc::truncl: 5628 if (visitUnaryFloatCall(I, ISD::FTRUNC)) 5629 return; 5630 break; 5631 case LibFunc::log2: 5632 case LibFunc::log2f: 5633 case LibFunc::log2l: 5634 if (visitUnaryFloatCall(I, ISD::FLOG2)) 5635 return; 5636 break; 5637 case LibFunc::exp2: 5638 case LibFunc::exp2f: 5639 case LibFunc::exp2l: 5640 if (visitUnaryFloatCall(I, ISD::FEXP2)) 5641 return; 5642 break; 5643 case LibFunc::memcmp: 5644 if (visitMemCmpCall(I)) 5645 return; 5646 break; 5647 case LibFunc::memchr: 5648 if (visitMemChrCall(I)) 5649 return; 5650 break; 5651 case LibFunc::strcpy: 5652 if (visitStrCpyCall(I, false)) 5653 return; 5654 break; 5655 case LibFunc::stpcpy: 5656 if (visitStrCpyCall(I, true)) 5657 return; 5658 break; 5659 case LibFunc::strcmp: 5660 if (visitStrCmpCall(I)) 5661 return; 5662 break; 5663 case LibFunc::strlen: 5664 if (visitStrLenCall(I)) 5665 return; 5666 break; 5667 case LibFunc::strnlen: 5668 if (visitStrNLenCall(I)) 5669 return; 5670 break; 5671 } 5672 } 5673 } 5674 5675 SDValue Callee; 5676 if (!RenameFn) 5677 Callee = getValue(I.getCalledValue()); 5678 else 5679 Callee = DAG.getExternalSymbol( 5680 RenameFn, 5681 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())); 5682 5683 // Check if we can potentially perform a tail call. More detailed checking is 5684 // be done within LowerCallTo, after more information about the call is known. 5685 LowerCallTo(&I, Callee, I.isTailCall()); 5686 } 5687 5688 namespace { 5689 5690 /// AsmOperandInfo - This contains information for each constraint that we are 5691 /// lowering. 5692 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { 5693 public: 5694 /// CallOperand - If this is the result output operand or a clobber 5695 /// this is null, otherwise it is the incoming operand to the CallInst. 5696 /// This gets modified as the asm is processed. 5697 SDValue CallOperand; 5698 5699 /// AssignedRegs - If this is a register or register class operand, this 5700 /// contains the set of register corresponding to the operand. 5701 RegsForValue AssignedRegs; 5702 5703 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info) 5704 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) { 5705 } 5706 5707 /// getCallOperandValEVT - Return the EVT of the Value* that this operand 5708 /// corresponds to. If there is no Value* for this operand, it returns 5709 /// MVT::Other. 5710 EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI, 5711 const DataLayout &DL) const { 5712 if (!CallOperandVal) return MVT::Other; 5713 5714 if (isa<BasicBlock>(CallOperandVal)) 5715 return TLI.getPointerTy(DL); 5716 5717 llvm::Type *OpTy = CallOperandVal->getType(); 5718 5719 // FIXME: code duplicated from TargetLowering::ParseConstraints(). 5720 // If this is an indirect operand, the operand is a pointer to the 5721 // accessed type. 5722 if (isIndirect) { 5723 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 5724 if (!PtrTy) 5725 report_fatal_error("Indirect operand for inline asm not a pointer!"); 5726 OpTy = PtrTy->getElementType(); 5727 } 5728 5729 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 5730 if (StructType *STy = dyn_cast<StructType>(OpTy)) 5731 if (STy->getNumElements() == 1) 5732 OpTy = STy->getElementType(0); 5733 5734 // If OpTy is not a single value, it may be a struct/union that we 5735 // can tile with integers. 5736 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 5737 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 5738 switch (BitSize) { 5739 default: break; 5740 case 1: 5741 case 8: 5742 case 16: 5743 case 32: 5744 case 64: 5745 case 128: 5746 OpTy = IntegerType::get(Context, BitSize); 5747 break; 5748 } 5749 } 5750 5751 return TLI.getValueType(DL, OpTy, true); 5752 } 5753 }; 5754 5755 typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector; 5756 5757 } // end anonymous namespace 5758 5759 /// GetRegistersForValue - Assign registers (virtual or physical) for the 5760 /// specified operand. We prefer to assign virtual registers, to allow the 5761 /// register allocator to handle the assignment process. However, if the asm 5762 /// uses features that we can't model on machineinstrs, we have SDISel do the 5763 /// allocation. This produces generally horrible, but correct, code. 5764 /// 5765 /// OpInfo describes the operand. 5766 /// 5767 static void GetRegistersForValue(SelectionDAG &DAG, 5768 const TargetLowering &TLI, 5769 SDLoc DL, 5770 SDISelAsmOperandInfo &OpInfo) { 5771 LLVMContext &Context = *DAG.getContext(); 5772 5773 MachineFunction &MF = DAG.getMachineFunction(); 5774 SmallVector<unsigned, 4> Regs; 5775 5776 // If this is a constraint for a single physreg, or a constraint for a 5777 // register class, find it. 5778 std::pair<unsigned, const TargetRegisterClass *> PhysReg = 5779 TLI.getRegForInlineAsmConstraint(MF.getSubtarget().getRegisterInfo(), 5780 OpInfo.ConstraintCode, 5781 OpInfo.ConstraintVT); 5782 5783 unsigned NumRegs = 1; 5784 if (OpInfo.ConstraintVT != MVT::Other) { 5785 // If this is a FP input in an integer register (or visa versa) insert a bit 5786 // cast of the input value. More generally, handle any case where the input 5787 // value disagrees with the register class we plan to stick this in. 5788 if (OpInfo.Type == InlineAsm::isInput && 5789 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) { 5790 // Try to convert to the first EVT that the reg class contains. If the 5791 // types are identical size, use a bitcast to convert (e.g. two differing 5792 // vector types). 5793 MVT RegVT = *PhysReg.second->vt_begin(); 5794 if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) { 5795 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL, 5796 RegVT, OpInfo.CallOperand); 5797 OpInfo.ConstraintVT = RegVT; 5798 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) { 5799 // If the input is a FP value and we want it in FP registers, do a 5800 // bitcast to the corresponding integer type. This turns an f64 value 5801 // into i64, which can be passed with two i32 values on a 32-bit 5802 // machine. 5803 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits()); 5804 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL, 5805 RegVT, OpInfo.CallOperand); 5806 OpInfo.ConstraintVT = RegVT; 5807 } 5808 } 5809 5810 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT); 5811 } 5812 5813 MVT RegVT; 5814 EVT ValueVT = OpInfo.ConstraintVT; 5815 5816 // If this is a constraint for a specific physical register, like {r17}, 5817 // assign it now. 5818 if (unsigned AssignedReg = PhysReg.first) { 5819 const TargetRegisterClass *RC = PhysReg.second; 5820 if (OpInfo.ConstraintVT == MVT::Other) 5821 ValueVT = *RC->vt_begin(); 5822 5823 // Get the actual register value type. This is important, because the user 5824 // may have asked for (e.g.) the AX register in i32 type. We need to 5825 // remember that AX is actually i16 to get the right extension. 5826 RegVT = *RC->vt_begin(); 5827 5828 // This is a explicit reference to a physical register. 5829 Regs.push_back(AssignedReg); 5830 5831 // If this is an expanded reference, add the rest of the regs to Regs. 5832 if (NumRegs != 1) { 5833 TargetRegisterClass::iterator I = RC->begin(); 5834 for (; *I != AssignedReg; ++I) 5835 assert(I != RC->end() && "Didn't find reg!"); 5836 5837 // Already added the first reg. 5838 --NumRegs; ++I; 5839 for (; NumRegs; --NumRegs, ++I) { 5840 assert(I != RC->end() && "Ran out of registers to allocate!"); 5841 Regs.push_back(*I); 5842 } 5843 } 5844 5845 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT); 5846 return; 5847 } 5848 5849 // Otherwise, if this was a reference to an LLVM register class, create vregs 5850 // for this reference. 5851 if (const TargetRegisterClass *RC = PhysReg.second) { 5852 RegVT = *RC->vt_begin(); 5853 if (OpInfo.ConstraintVT == MVT::Other) 5854 ValueVT = RegVT; 5855 5856 // Create the appropriate number of virtual registers. 5857 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 5858 for (; NumRegs; --NumRegs) 5859 Regs.push_back(RegInfo.createVirtualRegister(RC)); 5860 5861 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT); 5862 return; 5863 } 5864 5865 // Otherwise, we couldn't allocate enough registers for this. 5866 } 5867 5868 /// visitInlineAsm - Handle a call to an InlineAsm object. 5869 /// 5870 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { 5871 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 5872 5873 /// ConstraintOperands - Information about all of the constraints. 5874 SDISelAsmOperandInfoVector ConstraintOperands; 5875 5876 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5877 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints( 5878 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS); 5879 5880 bool hasMemory = false; 5881 5882 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 5883 unsigned ResNo = 0; // ResNo - The result number of the next output. 5884 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 5885 ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i])); 5886 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); 5887 5888 MVT OpVT = MVT::Other; 5889 5890 // Compute the value type for each operand. 5891 switch (OpInfo.Type) { 5892 case InlineAsm::isOutput: 5893 // Indirect outputs just consume an argument. 5894 if (OpInfo.isIndirect) { 5895 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 5896 break; 5897 } 5898 5899 // The return value of the call is this value. As such, there is no 5900 // corresponding argument. 5901 assert(!CS.getType()->isVoidTy() && "Bad inline asm!"); 5902 if (StructType *STy = dyn_cast<StructType>(CS.getType())) { 5903 OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), 5904 STy->getElementType(ResNo)); 5905 } else { 5906 assert(ResNo == 0 && "Asm only has one result!"); 5907 OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType()); 5908 } 5909 ++ResNo; 5910 break; 5911 case InlineAsm::isInput: 5912 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 5913 break; 5914 case InlineAsm::isClobber: 5915 // Nothing to do. 5916 break; 5917 } 5918 5919 // If this is an input or an indirect output, process the call argument. 5920 // BasicBlocks are labels, currently appearing only in asm's. 5921 if (OpInfo.CallOperandVal) { 5922 if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) { 5923 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]); 5924 } else { 5925 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal); 5926 } 5927 5928 OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, 5929 DAG.getDataLayout()).getSimpleVT(); 5930 } 5931 5932 OpInfo.ConstraintVT = OpVT; 5933 5934 // Indirect operand accesses access memory. 5935 if (OpInfo.isIndirect) 5936 hasMemory = true; 5937 else { 5938 for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) { 5939 TargetLowering::ConstraintType 5940 CType = TLI.getConstraintType(OpInfo.Codes[j]); 5941 if (CType == TargetLowering::C_Memory) { 5942 hasMemory = true; 5943 break; 5944 } 5945 } 5946 } 5947 } 5948 5949 SDValue Chain, Flag; 5950 5951 // We won't need to flush pending loads if this asm doesn't touch 5952 // memory and is nonvolatile. 5953 if (hasMemory || IA->hasSideEffects()) 5954 Chain = getRoot(); 5955 else 5956 Chain = DAG.getRoot(); 5957 5958 // Second pass over the constraints: compute which constraint option to use 5959 // and assign registers to constraints that want a specific physreg. 5960 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 5961 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 5962 5963 // If this is an output operand with a matching input operand, look up the 5964 // matching input. If their types mismatch, e.g. one is an integer, the 5965 // other is floating point, or their sizes are different, flag it as an 5966 // error. 5967 if (OpInfo.hasMatchingInput()) { 5968 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 5969 5970 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 5971 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); 5972 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 5973 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 5974 OpInfo.ConstraintVT); 5975 std::pair<unsigned, const TargetRegisterClass *> InputRC = 5976 TLI.getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 5977 Input.ConstraintVT); 5978 if ((OpInfo.ConstraintVT.isInteger() != 5979 Input.ConstraintVT.isInteger()) || 5980 (MatchRC.second != InputRC.second)) { 5981 report_fatal_error("Unsupported asm: input constraint" 5982 " with a matching output constraint of" 5983 " incompatible type!"); 5984 } 5985 Input.ConstraintVT = OpInfo.ConstraintVT; 5986 } 5987 } 5988 5989 // Compute the constraint code and ConstraintType to use. 5990 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG); 5991 5992 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 5993 OpInfo.Type == InlineAsm::isClobber) 5994 continue; 5995 5996 // If this is a memory input, and if the operand is not indirect, do what we 5997 // need to to provide an address for the memory input. 5998 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 5999 !OpInfo.isIndirect) { 6000 assert((OpInfo.isMultipleAlternative || 6001 (OpInfo.Type == InlineAsm::isInput)) && 6002 "Can only indirectify direct input operands!"); 6003 6004 // Memory operands really want the address of the value. If we don't have 6005 // an indirect input, put it in the constpool if we can, otherwise spill 6006 // it to a stack slot. 6007 // TODO: This isn't quite right. We need to handle these according to 6008 // the addressing mode that the constraint wants. Also, this may take 6009 // an additional register for the computation and we don't want that 6010 // either. 6011 6012 // If the operand is a float, integer, or vector constant, spill to a 6013 // constant pool entry to get its address. 6014 const Value *OpVal = OpInfo.CallOperandVal; 6015 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) || 6016 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) { 6017 OpInfo.CallOperand = DAG.getConstantPool( 6018 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout())); 6019 } else { 6020 // Otherwise, create a stack slot and emit a store to it before the 6021 // asm. 6022 Type *Ty = OpVal->getType(); 6023 auto &DL = DAG.getDataLayout(); 6024 uint64_t TySize = DL.getTypeAllocSize(Ty); 6025 unsigned Align = DL.getPrefTypeAlignment(Ty); 6026 MachineFunction &MF = DAG.getMachineFunction(); 6027 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false); 6028 SDValue StackSlot = 6029 DAG.getFrameIndex(SSFI, TLI.getPointerTy(DAG.getDataLayout())); 6030 Chain = DAG.getStore(Chain, getCurSDLoc(), 6031 OpInfo.CallOperand, StackSlot, 6032 MachinePointerInfo::getFixedStack(SSFI), 6033 false, false, 0); 6034 OpInfo.CallOperand = StackSlot; 6035 } 6036 6037 // There is no longer a Value* corresponding to this operand. 6038 OpInfo.CallOperandVal = nullptr; 6039 6040 // It is now an indirect operand. 6041 OpInfo.isIndirect = true; 6042 } 6043 6044 // If this constraint is for a specific register, allocate it before 6045 // anything else. 6046 if (OpInfo.ConstraintType == TargetLowering::C_Register) 6047 GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo); 6048 } 6049 6050 // Second pass - Loop over all of the operands, assigning virtual or physregs 6051 // to register class operands. 6052 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 6053 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 6054 6055 // C_Register operands have already been allocated, Other/Memory don't need 6056 // to be. 6057 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass) 6058 GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo); 6059 } 6060 6061 // AsmNodeOperands - The operands for the ISD::INLINEASM node. 6062 std::vector<SDValue> AsmNodeOperands; 6063 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain 6064 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol( 6065 IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout()))); 6066 6067 // If we have a !srcloc metadata node associated with it, we want to attach 6068 // this to the ultimately generated inline asm machineinstr. To do this, we 6069 // pass in the third operand as this (potentially null) inline asm MDNode. 6070 const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc"); 6071 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc)); 6072 6073 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore 6074 // bits as operand 3. 6075 unsigned ExtraInfo = 0; 6076 if (IA->hasSideEffects()) 6077 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 6078 if (IA->isAlignStack()) 6079 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 6080 // Set the asm dialect. 6081 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect; 6082 6083 // Determine if this InlineAsm MayLoad or MayStore based on the constraints. 6084 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 6085 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; 6086 6087 // Compute the constraint code and ConstraintType to use. 6088 TLI.ComputeConstraintToUse(OpInfo, SDValue()); 6089 6090 // Ideally, we would only check against memory constraints. However, the 6091 // meaning of an other constraint can be target-specific and we can't easily 6092 // reason about it. Therefore, be conservative and set MayLoad/MayStore 6093 // for other constriants as well. 6094 if (OpInfo.ConstraintType == TargetLowering::C_Memory || 6095 OpInfo.ConstraintType == TargetLowering::C_Other) { 6096 if (OpInfo.Type == InlineAsm::isInput) 6097 ExtraInfo |= InlineAsm::Extra_MayLoad; 6098 else if (OpInfo.Type == InlineAsm::isOutput) 6099 ExtraInfo |= InlineAsm::Extra_MayStore; 6100 else if (OpInfo.Type == InlineAsm::isClobber) 6101 ExtraInfo |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); 6102 } 6103 } 6104 6105 AsmNodeOperands.push_back(DAG.getTargetConstant( 6106 ExtraInfo, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 6107 6108 // Loop over all of the inputs, copying the operand values into the 6109 // appropriate registers and processing the output regs. 6110 RegsForValue RetValRegs; 6111 6112 // IndirectStoresToEmit - The set of stores to emit after the inline asm node. 6113 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit; 6114 6115 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 6116 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 6117 6118 switch (OpInfo.Type) { 6119 case InlineAsm::isOutput: { 6120 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass && 6121 OpInfo.ConstraintType != TargetLowering::C_Register) { 6122 // Memory output, or 'other' output (e.g. 'X' constraint). 6123 assert(OpInfo.isIndirect && "Memory output must be indirect operand"); 6124 6125 unsigned ConstraintID = 6126 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); 6127 assert(ConstraintID != InlineAsm::Constraint_Unknown && 6128 "Failed to convert memory constraint code to constraint id."); 6129 6130 // Add information to the INLINEASM node to know about this output. 6131 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); 6132 OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); 6133 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(), 6134 MVT::i32)); 6135 AsmNodeOperands.push_back(OpInfo.CallOperand); 6136 break; 6137 } 6138 6139 // Otherwise, this is a register or register class output. 6140 6141 // Copy the output from the appropriate register. Find a register that 6142 // we can use. 6143 if (OpInfo.AssignedRegs.Regs.empty()) { 6144 LLVMContext &Ctx = *DAG.getContext(); 6145 Ctx.emitError(CS.getInstruction(), 6146 "couldn't allocate output register for constraint '" + 6147 Twine(OpInfo.ConstraintCode) + "'"); 6148 return; 6149 } 6150 6151 // If this is an indirect operand, store through the pointer after the 6152 // asm. 6153 if (OpInfo.isIndirect) { 6154 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs, 6155 OpInfo.CallOperandVal)); 6156 } else { 6157 // This is the result value of the call. 6158 assert(!CS.getType()->isVoidTy() && "Bad inline asm!"); 6159 // Concatenate this output onto the outputs list. 6160 RetValRegs.append(OpInfo.AssignedRegs); 6161 } 6162 6163 // Add information to the INLINEASM node to know that this register is 6164 // set. 6165 OpInfo.AssignedRegs 6166 .AddInlineAsmOperands(OpInfo.isEarlyClobber 6167 ? InlineAsm::Kind_RegDefEarlyClobber 6168 : InlineAsm::Kind_RegDef, 6169 false, 0, getCurSDLoc(), DAG, AsmNodeOperands); 6170 break; 6171 } 6172 case InlineAsm::isInput: { 6173 SDValue InOperandVal = OpInfo.CallOperand; 6174 6175 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint? 6176 // If this is required to match an output register we have already set, 6177 // just use its register. 6178 unsigned OperandNo = OpInfo.getMatchedOperand(); 6179 6180 // Scan until we find the definition we already emitted of this operand. 6181 // When we find it, create a RegsForValue operand. 6182 unsigned CurOp = InlineAsm::Op_FirstOperand; 6183 for (; OperandNo; --OperandNo) { 6184 // Advance to the next operand. 6185 unsigned OpFlag = 6186 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue(); 6187 assert((InlineAsm::isRegDefKind(OpFlag) || 6188 InlineAsm::isRegDefEarlyClobberKind(OpFlag) || 6189 InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?"); 6190 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1; 6191 } 6192 6193 unsigned OpFlag = 6194 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue(); 6195 if (InlineAsm::isRegDefKind(OpFlag) || 6196 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) { 6197 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs. 6198 if (OpInfo.isIndirect) { 6199 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c 6200 LLVMContext &Ctx = *DAG.getContext(); 6201 Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:" 6202 " don't know how to handle tied " 6203 "indirect register inputs"); 6204 return; 6205 } 6206 6207 RegsForValue MatchedRegs; 6208 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType()); 6209 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType(); 6210 MatchedRegs.RegVTs.push_back(RegVT); 6211 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo(); 6212 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag); 6213 i != e; ++i) { 6214 if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) 6215 MatchedRegs.Regs.push_back(RegInfo.createVirtualRegister(RC)); 6216 else { 6217 LLVMContext &Ctx = *DAG.getContext(); 6218 Ctx.emitError(CS.getInstruction(), 6219 "inline asm error: This value" 6220 " type register class is not natively supported!"); 6221 return; 6222 } 6223 } 6224 SDLoc dl = getCurSDLoc(); 6225 // Use the produced MatchedRegs object to 6226 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, 6227 Chain, &Flag, CS.getInstruction()); 6228 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, 6229 true, OpInfo.getMatchedOperand(), dl, 6230 DAG, AsmNodeOperands); 6231 break; 6232 } 6233 6234 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!"); 6235 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 && 6236 "Unexpected number of operands"); 6237 // Add information to the INLINEASM node to know about this input. 6238 // See InlineAsm.h isUseOperandTiedToDef. 6239 OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag); 6240 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag, 6241 OpInfo.getMatchedOperand()); 6242 AsmNodeOperands.push_back(DAG.getTargetConstant( 6243 OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 6244 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]); 6245 break; 6246 } 6247 6248 // Treat indirect 'X' constraint as memory. 6249 if (OpInfo.ConstraintType == TargetLowering::C_Other && 6250 OpInfo.isIndirect) 6251 OpInfo.ConstraintType = TargetLowering::C_Memory; 6252 6253 if (OpInfo.ConstraintType == TargetLowering::C_Other) { 6254 std::vector<SDValue> Ops; 6255 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode, 6256 Ops, DAG); 6257 if (Ops.empty()) { 6258 LLVMContext &Ctx = *DAG.getContext(); 6259 Ctx.emitError(CS.getInstruction(), 6260 "invalid operand for inline asm constraint '" + 6261 Twine(OpInfo.ConstraintCode) + "'"); 6262 return; 6263 } 6264 6265 // Add information to the INLINEASM node to know about this input. 6266 unsigned ResOpType = 6267 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size()); 6268 AsmNodeOperands.push_back(DAG.getTargetConstant( 6269 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 6270 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end()); 6271 break; 6272 } 6273 6274 if (OpInfo.ConstraintType == TargetLowering::C_Memory) { 6275 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!"); 6276 assert(InOperandVal.getValueType() == 6277 TLI.getPointerTy(DAG.getDataLayout()) && 6278 "Memory operands expect pointer values"); 6279 6280 unsigned ConstraintID = 6281 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); 6282 assert(ConstraintID != InlineAsm::Constraint_Unknown && 6283 "Failed to convert memory constraint code to constraint id."); 6284 6285 // Add information to the INLINEASM node to know about this input. 6286 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); 6287 ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID); 6288 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, 6289 getCurSDLoc(), 6290 MVT::i32)); 6291 AsmNodeOperands.push_back(InOperandVal); 6292 break; 6293 } 6294 6295 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass || 6296 OpInfo.ConstraintType == TargetLowering::C_Register) && 6297 "Unknown constraint type!"); 6298 6299 // TODO: Support this. 6300 if (OpInfo.isIndirect) { 6301 LLVMContext &Ctx = *DAG.getContext(); 6302 Ctx.emitError(CS.getInstruction(), 6303 "Don't know how to handle indirect register inputs yet " 6304 "for constraint '" + 6305 Twine(OpInfo.ConstraintCode) + "'"); 6306 return; 6307 } 6308 6309 // Copy the input into the appropriate registers. 6310 if (OpInfo.AssignedRegs.Regs.empty()) { 6311 LLVMContext &Ctx = *DAG.getContext(); 6312 Ctx.emitError(CS.getInstruction(), 6313 "couldn't allocate input reg for constraint '" + 6314 Twine(OpInfo.ConstraintCode) + "'"); 6315 return; 6316 } 6317 6318 SDLoc dl = getCurSDLoc(); 6319 6320 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, 6321 Chain, &Flag, CS.getInstruction()); 6322 6323 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0, 6324 dl, DAG, AsmNodeOperands); 6325 break; 6326 } 6327 case InlineAsm::isClobber: { 6328 // Add the clobbered value to the operand list, so that the register 6329 // allocator is aware that the physreg got clobbered. 6330 if (!OpInfo.AssignedRegs.Regs.empty()) 6331 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber, 6332 false, 0, getCurSDLoc(), DAG, 6333 AsmNodeOperands); 6334 break; 6335 } 6336 } 6337 } 6338 6339 // Finish up input operands. Set the input chain and add the flag last. 6340 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain; 6341 if (Flag.getNode()) AsmNodeOperands.push_back(Flag); 6342 6343 Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(), 6344 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands); 6345 Flag = Chain.getValue(1); 6346 6347 // If this asm returns a register value, copy the result from that register 6348 // and set it as the value of the call. 6349 if (!RetValRegs.Regs.empty()) { 6350 SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), 6351 Chain, &Flag, CS.getInstruction()); 6352 6353 // FIXME: Why don't we do this for inline asms with MRVs? 6354 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) { 6355 EVT ResultType = TLI.getValueType(DAG.getDataLayout(), CS.getType()); 6356 6357 // If any of the results of the inline asm is a vector, it may have the 6358 // wrong width/num elts. This can happen for register classes that can 6359 // contain multiple different value types. The preg or vreg allocated may 6360 // not have the same VT as was expected. Convert it to the right type 6361 // with bit_convert. 6362 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) { 6363 Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(), 6364 ResultType, Val); 6365 6366 } else if (ResultType != Val.getValueType() && 6367 ResultType.isInteger() && Val.getValueType().isInteger()) { 6368 // If a result value was tied to an input value, the computed result may 6369 // have a wider width than the expected result. Extract the relevant 6370 // portion. 6371 Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val); 6372 } 6373 6374 assert(ResultType == Val.getValueType() && "Asm result value mismatch!"); 6375 } 6376 6377 setValue(CS.getInstruction(), Val); 6378 // Don't need to use this as a chain in this case. 6379 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty()) 6380 return; 6381 } 6382 6383 std::vector<std::pair<SDValue, const Value *> > StoresToEmit; 6384 6385 // Process indirect outputs, first output all of the flagged copies out of 6386 // physregs. 6387 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { 6388 RegsForValue &OutRegs = IndirectStoresToEmit[i].first; 6389 const Value *Ptr = IndirectStoresToEmit[i].second; 6390 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), 6391 Chain, &Flag, IA); 6392 StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); 6393 } 6394 6395 // Emit the non-flagged stores from the physregs. 6396 SmallVector<SDValue, 8> OutChains; 6397 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) { 6398 SDValue Val = DAG.getStore(Chain, getCurSDLoc(), 6399 StoresToEmit[i].first, 6400 getValue(StoresToEmit[i].second), 6401 MachinePointerInfo(StoresToEmit[i].second), 6402 false, false, 0); 6403 OutChains.push_back(Val); 6404 } 6405 6406 if (!OutChains.empty()) 6407 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains); 6408 6409 DAG.setRoot(Chain); 6410 } 6411 6412 void SelectionDAGBuilder::visitVAStart(const CallInst &I) { 6413 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(), 6414 MVT::Other, getRoot(), 6415 getValue(I.getArgOperand(0)), 6416 DAG.getSrcValue(I.getArgOperand(0)))); 6417 } 6418 6419 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) { 6420 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6421 const DataLayout &DL = DAG.getDataLayout(); 6422 SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()), 6423 getCurSDLoc(), getRoot(), getValue(I.getOperand(0)), 6424 DAG.getSrcValue(I.getOperand(0)), 6425 DL.getABITypeAlignment(I.getType())); 6426 setValue(&I, V); 6427 DAG.setRoot(V.getValue(1)); 6428 } 6429 6430 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) { 6431 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(), 6432 MVT::Other, getRoot(), 6433 getValue(I.getArgOperand(0)), 6434 DAG.getSrcValue(I.getArgOperand(0)))); 6435 } 6436 6437 void SelectionDAGBuilder::visitVACopy(const CallInst &I) { 6438 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(), 6439 MVT::Other, getRoot(), 6440 getValue(I.getArgOperand(0)), 6441 getValue(I.getArgOperand(1)), 6442 DAG.getSrcValue(I.getArgOperand(0)), 6443 DAG.getSrcValue(I.getArgOperand(1)))); 6444 } 6445 6446 /// \brief Lower an argument list according to the target calling convention. 6447 /// 6448 /// \return A tuple of <return-value, token-chain> 6449 /// 6450 /// This is a helper for lowering intrinsics that follow a target calling 6451 /// convention or require stack pointer adjustment. Only a subset of the 6452 /// intrinsic's operands need to participate in the calling convention. 6453 std::pair<SDValue, SDValue> 6454 SelectionDAGBuilder::lowerCallOperands(ImmutableCallSite CS, unsigned ArgIdx, 6455 unsigned NumArgs, SDValue Callee, 6456 Type *ReturnTy, 6457 MachineBasicBlock *LandingPad, 6458 bool IsPatchPoint) { 6459 TargetLowering::ArgListTy Args; 6460 Args.reserve(NumArgs); 6461 6462 // Populate the argument list. 6463 // Attributes for args start at offset 1, after the return attribute. 6464 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1; 6465 ArgI != ArgE; ++ArgI) { 6466 const Value *V = CS->getOperand(ArgI); 6467 6468 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 6469 6470 TargetLowering::ArgListEntry Entry; 6471 Entry.Node = getValue(V); 6472 Entry.Ty = V->getType(); 6473 Entry.setAttributes(&CS, AttrI); 6474 Args.push_back(Entry); 6475 } 6476 6477 TargetLowering::CallLoweringInfo CLI(DAG); 6478 CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot()) 6479 .setCallee(CS.getCallingConv(), ReturnTy, Callee, std::move(Args), NumArgs) 6480 .setDiscardResult(CS->use_empty()).setIsPatchPoint(IsPatchPoint); 6481 6482 return lowerInvokable(CLI, LandingPad); 6483 } 6484 6485 /// \brief Add a stack map intrinsic call's live variable operands to a stackmap 6486 /// or patchpoint target node's operand list. 6487 /// 6488 /// Constants are converted to TargetConstants purely as an optimization to 6489 /// avoid constant materialization and register allocation. 6490 /// 6491 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not 6492 /// generate addess computation nodes, and so ExpandISelPseudo can convert the 6493 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids 6494 /// address materialization and register allocation, but may also be required 6495 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an 6496 /// alloca in the entry block, then the runtime may assume that the alloca's 6497 /// StackMap location can be read immediately after compilation and that the 6498 /// location is valid at any point during execution (this is similar to the 6499 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were 6500 /// only available in a register, then the runtime would need to trap when 6501 /// execution reaches the StackMap in order to read the alloca's location. 6502 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx, 6503 SDLoc DL, SmallVectorImpl<SDValue> &Ops, 6504 SelectionDAGBuilder &Builder) { 6505 for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) { 6506 SDValue OpVal = Builder.getValue(CS.getArgument(i)); 6507 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) { 6508 Ops.push_back( 6509 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64)); 6510 Ops.push_back( 6511 Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64)); 6512 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) { 6513 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo(); 6514 Ops.push_back(Builder.DAG.getTargetFrameIndex( 6515 FI->getIndex(), TLI.getPointerTy(Builder.DAG.getDataLayout()))); 6516 } else 6517 Ops.push_back(OpVal); 6518 } 6519 } 6520 6521 /// \brief Lower llvm.experimental.stackmap directly to its target opcode. 6522 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { 6523 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>, 6524 // [live variables...]) 6525 6526 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value."); 6527 6528 SDValue Chain, InFlag, Callee, NullPtr; 6529 SmallVector<SDValue, 32> Ops; 6530 6531 SDLoc DL = getCurSDLoc(); 6532 Callee = getValue(CI.getCalledValue()); 6533 NullPtr = DAG.getIntPtrConstant(0, DL, true); 6534 6535 // The stackmap intrinsic only records the live variables (the arguemnts 6536 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 6537 // intrinsic, this won't be lowered to a function call. This means we don't 6538 // have to worry about calling conventions and target specific lowering code. 6539 // Instead we perform the call lowering right here. 6540 // 6541 // chain, flag = CALLSEQ_START(chain, 0) 6542 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag) 6543 // chain, flag = CALLSEQ_END(chain, 0, 0, flag) 6544 // 6545 Chain = DAG.getCALLSEQ_START(getRoot(), NullPtr, DL); 6546 InFlag = Chain.getValue(1); 6547 6548 // Add the <id> and <numBytes> constants. 6549 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos)); 6550 Ops.push_back(DAG.getTargetConstant( 6551 cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64)); 6552 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos)); 6553 Ops.push_back(DAG.getTargetConstant( 6554 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL, 6555 MVT::i32)); 6556 6557 // Push live variables for the stack map. 6558 addStackMapLiveVars(&CI, 2, DL, Ops, *this); 6559 6560 // We are not pushing any register mask info here on the operands list, 6561 // because the stackmap doesn't clobber anything. 6562 6563 // Push the chain and the glue flag. 6564 Ops.push_back(Chain); 6565 Ops.push_back(InFlag); 6566 6567 // Create the STACKMAP node. 6568 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 6569 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops); 6570 Chain = SDValue(SM, 0); 6571 InFlag = Chain.getValue(1); 6572 6573 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL); 6574 6575 // Stackmaps don't generate values, so nothing goes into the NodeMap. 6576 6577 // Set the root to the target-lowered call chain. 6578 DAG.setRoot(Chain); 6579 6580 // Inform the Frame Information that we have a stackmap in this function. 6581 FuncInfo.MF->getFrameInfo()->setHasStackMap(); 6582 } 6583 6584 /// \brief Lower llvm.experimental.patchpoint directly to its target opcode. 6585 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS, 6586 MachineBasicBlock *LandingPad) { 6587 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, 6588 // i32 <numBytes>, 6589 // i8* <target>, 6590 // i32 <numArgs>, 6591 // [Args...], 6592 // [live variables...]) 6593 6594 CallingConv::ID CC = CS.getCallingConv(); 6595 bool IsAnyRegCC = CC == CallingConv::AnyReg; 6596 bool HasDef = !CS->getType()->isVoidTy(); 6597 SDLoc dl = getCurSDLoc(); 6598 SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos)); 6599 6600 // Handle immediate and symbolic callees. 6601 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee)) 6602 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl, 6603 /*isTarget=*/true); 6604 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee)) 6605 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(), 6606 SDLoc(SymbolicCallee), 6607 SymbolicCallee->getValueType(0)); 6608 6609 // Get the real number of arguments participating in the call <numArgs> 6610 SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos)); 6611 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue(); 6612 6613 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 6614 // Intrinsics include all meta-operands up to but not including CC. 6615 unsigned NumMetaOpers = PatchPointOpers::CCPos; 6616 assert(CS.arg_size() >= NumMetaOpers + NumArgs && 6617 "Not enough arguments provided to the patchpoint intrinsic"); 6618 6619 // For AnyRegCC the arguments are lowered later on manually. 6620 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 6621 Type *ReturnTy = 6622 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType(); 6623 std::pair<SDValue, SDValue> Result = 6624 lowerCallOperands(CS, NumMetaOpers, NumCallArgs, Callee, ReturnTy, 6625 LandingPad, true); 6626 6627 SDNode *CallEnd = Result.second.getNode(); 6628 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg)) 6629 CallEnd = CallEnd->getOperand(0).getNode(); 6630 6631 /// Get a call instruction from the call sequence chain. 6632 /// Tail calls are not allowed. 6633 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END && 6634 "Expected a callseq node."); 6635 SDNode *Call = CallEnd->getOperand(0).getNode(); 6636 bool HasGlue = Call->getGluedNode(); 6637 6638 // Replace the target specific call node with the patchable intrinsic. 6639 SmallVector<SDValue, 8> Ops; 6640 6641 // Add the <id> and <numBytes> constants. 6642 SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos)); 6643 Ops.push_back(DAG.getTargetConstant( 6644 cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64)); 6645 SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos)); 6646 Ops.push_back(DAG.getTargetConstant( 6647 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl, 6648 MVT::i32)); 6649 6650 // Add the callee. 6651 Ops.push_back(Callee); 6652 6653 // Adjust <numArgs> to account for any arguments that have been passed on the 6654 // stack instead. 6655 // Call Node: Chain, Target, {Args}, RegMask, [Glue] 6656 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3); 6657 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs; 6658 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32)); 6659 6660 // Add the calling convention 6661 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32)); 6662 6663 // Add the arguments we omitted previously. The register allocator should 6664 // place these in any free register. 6665 if (IsAnyRegCC) 6666 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) 6667 Ops.push_back(getValue(CS.getArgument(i))); 6668 6669 // Push the arguments from the call instruction up to the register mask. 6670 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1; 6671 Ops.append(Call->op_begin() + 2, e); 6672 6673 // Push live variables for the stack map. 6674 addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this); 6675 6676 // Push the register mask info. 6677 if (HasGlue) 6678 Ops.push_back(*(Call->op_end()-2)); 6679 else 6680 Ops.push_back(*(Call->op_end()-1)); 6681 6682 // Push the chain (this is originally the first operand of the call, but 6683 // becomes now the last or second to last operand). 6684 Ops.push_back(*(Call->op_begin())); 6685 6686 // Push the glue flag (last operand). 6687 if (HasGlue) 6688 Ops.push_back(*(Call->op_end()-1)); 6689 6690 SDVTList NodeTys; 6691 if (IsAnyRegCC && HasDef) { 6692 // Create the return types based on the intrinsic definition 6693 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6694 SmallVector<EVT, 3> ValueVTs; 6695 ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs); 6696 assert(ValueVTs.size() == 1 && "Expected only one return value type."); 6697 6698 // There is always a chain and a glue type at the end 6699 ValueVTs.push_back(MVT::Other); 6700 ValueVTs.push_back(MVT::Glue); 6701 NodeTys = DAG.getVTList(ValueVTs); 6702 } else 6703 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 6704 6705 // Replace the target specific call node with a PATCHPOINT node. 6706 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT, 6707 dl, NodeTys, Ops); 6708 6709 // Update the NodeMap. 6710 if (HasDef) { 6711 if (IsAnyRegCC) 6712 setValue(CS.getInstruction(), SDValue(MN, 0)); 6713 else 6714 setValue(CS.getInstruction(), Result.first); 6715 } 6716 6717 // Fixup the consumers of the intrinsic. The chain and glue may be used in the 6718 // call sequence. Furthermore the location of the chain and glue can change 6719 // when the AnyReg calling convention is used and the intrinsic returns a 6720 // value. 6721 if (IsAnyRegCC && HasDef) { 6722 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)}; 6723 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)}; 6724 DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 6725 } else 6726 DAG.ReplaceAllUsesWith(Call, MN); 6727 DAG.DeleteNode(Call); 6728 6729 // Inform the Frame Information that we have a patchpoint in this function. 6730 FuncInfo.MF->getFrameInfo()->setHasPatchPoint(); 6731 } 6732 6733 /// Returns an AttributeSet representing the attributes applied to the return 6734 /// value of the given call. 6735 static AttributeSet getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) { 6736 SmallVector<Attribute::AttrKind, 2> Attrs; 6737 if (CLI.RetSExt) 6738 Attrs.push_back(Attribute::SExt); 6739 if (CLI.RetZExt) 6740 Attrs.push_back(Attribute::ZExt); 6741 if (CLI.IsInReg) 6742 Attrs.push_back(Attribute::InReg); 6743 6744 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex, 6745 Attrs); 6746 } 6747 6748 /// TargetLowering::LowerCallTo - This is the default LowerCallTo 6749 /// implementation, which just calls LowerCall. 6750 /// FIXME: When all targets are 6751 /// migrated to using LowerCall, this hook should be integrated into SDISel. 6752 std::pair<SDValue, SDValue> 6753 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { 6754 // Handle the incoming return values from the call. 6755 CLI.Ins.clear(); 6756 Type *OrigRetTy = CLI.RetTy; 6757 SmallVector<EVT, 4> RetTys; 6758 SmallVector<uint64_t, 4> Offsets; 6759 auto &DL = CLI.DAG.getDataLayout(); 6760 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets); 6761 6762 SmallVector<ISD::OutputArg, 4> Outs; 6763 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL); 6764 6765 bool CanLowerReturn = 6766 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(), 6767 CLI.IsVarArg, Outs, CLI.RetTy->getContext()); 6768 6769 SDValue DemoteStackSlot; 6770 int DemoteStackIdx = -100; 6771 if (!CanLowerReturn) { 6772 // FIXME: equivalent assert? 6773 // assert(!CS.hasInAllocaArgument() && 6774 // "sret demotion is incompatible with inalloca"); 6775 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy); 6776 unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy); 6777 MachineFunction &MF = CLI.DAG.getMachineFunction(); 6778 DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false); 6779 Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy); 6780 6781 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getPointerTy(DL)); 6782 ArgListEntry Entry; 6783 Entry.Node = DemoteStackSlot; 6784 Entry.Ty = StackSlotPtrType; 6785 Entry.isSExt = false; 6786 Entry.isZExt = false; 6787 Entry.isInReg = false; 6788 Entry.isSRet = true; 6789 Entry.isNest = false; 6790 Entry.isByVal = false; 6791 Entry.isReturned = false; 6792 Entry.Alignment = Align; 6793 CLI.getArgs().insert(CLI.getArgs().begin(), Entry); 6794 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext()); 6795 6796 // sret demotion isn't compatible with tail-calls, since the sret argument 6797 // points into the callers stack frame. 6798 CLI.IsTailCall = false; 6799 } else { 6800 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 6801 EVT VT = RetTys[I]; 6802 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT); 6803 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT); 6804 for (unsigned i = 0; i != NumRegs; ++i) { 6805 ISD::InputArg MyFlags; 6806 MyFlags.VT = RegisterVT; 6807 MyFlags.ArgVT = VT; 6808 MyFlags.Used = CLI.IsReturnValueUsed; 6809 if (CLI.RetSExt) 6810 MyFlags.Flags.setSExt(); 6811 if (CLI.RetZExt) 6812 MyFlags.Flags.setZExt(); 6813 if (CLI.IsInReg) 6814 MyFlags.Flags.setInReg(); 6815 CLI.Ins.push_back(MyFlags); 6816 } 6817 } 6818 } 6819 6820 // Handle all of the outgoing arguments. 6821 CLI.Outs.clear(); 6822 CLI.OutVals.clear(); 6823 ArgListTy &Args = CLI.getArgs(); 6824 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 6825 SmallVector<EVT, 4> ValueVTs; 6826 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs); 6827 Type *FinalType = Args[i].Ty; 6828 if (Args[i].isByVal) 6829 FinalType = cast<PointerType>(Args[i].Ty)->getElementType(); 6830 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters( 6831 FinalType, CLI.CallConv, CLI.IsVarArg); 6832 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues; 6833 ++Value) { 6834 EVT VT = ValueVTs[Value]; 6835 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext()); 6836 SDValue Op = SDValue(Args[i].Node.getNode(), 6837 Args[i].Node.getResNo() + Value); 6838 ISD::ArgFlagsTy Flags; 6839 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 6840 6841 if (Args[i].isZExt) 6842 Flags.setZExt(); 6843 if (Args[i].isSExt) 6844 Flags.setSExt(); 6845 if (Args[i].isInReg) 6846 Flags.setInReg(); 6847 if (Args[i].isSRet) 6848 Flags.setSRet(); 6849 if (Args[i].isByVal) 6850 Flags.setByVal(); 6851 if (Args[i].isInAlloca) { 6852 Flags.setInAlloca(); 6853 // Set the byval flag for CCAssignFn callbacks that don't know about 6854 // inalloca. This way we can know how many bytes we should've allocated 6855 // and how many bytes a callee cleanup function will pop. If we port 6856 // inalloca to more targets, we'll have to add custom inalloca handling 6857 // in the various CC lowering callbacks. 6858 Flags.setByVal(); 6859 } 6860 if (Args[i].isByVal || Args[i].isInAlloca) { 6861 PointerType *Ty = cast<PointerType>(Args[i].Ty); 6862 Type *ElementTy = Ty->getElementType(); 6863 Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); 6864 // For ByVal, alignment should come from FE. BE will guess if this 6865 // info is not there but there are cases it cannot get right. 6866 unsigned FrameAlign; 6867 if (Args[i].Alignment) 6868 FrameAlign = Args[i].Alignment; 6869 else 6870 FrameAlign = getByValTypeAlignment(ElementTy, DL); 6871 Flags.setByValAlign(FrameAlign); 6872 } 6873 if (Args[i].isNest) 6874 Flags.setNest(); 6875 if (NeedsRegBlock) 6876 Flags.setInConsecutiveRegs(); 6877 Flags.setOrigAlign(OriginalAlignment); 6878 6879 MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT); 6880 unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT); 6881 SmallVector<SDValue, 4> Parts(NumParts); 6882 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 6883 6884 if (Args[i].isSExt) 6885 ExtendKind = ISD::SIGN_EXTEND; 6886 else if (Args[i].isZExt) 6887 ExtendKind = ISD::ZERO_EXTEND; 6888 6889 // Conservatively only handle 'returned' on non-vectors for now 6890 if (Args[i].isReturned && !Op.getValueType().isVector()) { 6891 assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues && 6892 "unexpected use of 'returned'"); 6893 // Before passing 'returned' to the target lowering code, ensure that 6894 // either the register MVT and the actual EVT are the same size or that 6895 // the return value and argument are extended in the same way; in these 6896 // cases it's safe to pass the argument register value unchanged as the 6897 // return register value (although it's at the target's option whether 6898 // to do so) 6899 // TODO: allow code generation to take advantage of partially preserved 6900 // registers rather than clobbering the entire register when the 6901 // parameter extension method is not compatible with the return 6902 // extension method 6903 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) || 6904 (ExtendKind != ISD::ANY_EXTEND && 6905 CLI.RetSExt == Args[i].isSExt && CLI.RetZExt == Args[i].isZExt)) 6906 Flags.setReturned(); 6907 } 6908 6909 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, 6910 CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind); 6911 6912 for (unsigned j = 0; j != NumParts; ++j) { 6913 // if it isn't first piece, alignment must be 1 6914 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT, 6915 i < CLI.NumFixedArgs, 6916 i, j*Parts[j].getValueType().getStoreSize()); 6917 if (NumParts > 1 && j == 0) 6918 MyFlags.Flags.setSplit(); 6919 else if (j != 0) 6920 MyFlags.Flags.setOrigAlign(1); 6921 6922 CLI.Outs.push_back(MyFlags); 6923 CLI.OutVals.push_back(Parts[j]); 6924 } 6925 6926 if (NeedsRegBlock && Value == NumValues - 1) 6927 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast(); 6928 } 6929 } 6930 6931 SmallVector<SDValue, 4> InVals; 6932 CLI.Chain = LowerCall(CLI, InVals); 6933 6934 // Verify that the target's LowerCall behaved as expected. 6935 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other && 6936 "LowerCall didn't return a valid chain!"); 6937 assert((!CLI.IsTailCall || InVals.empty()) && 6938 "LowerCall emitted a return value for a tail call!"); 6939 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) && 6940 "LowerCall didn't emit the correct number of values!"); 6941 6942 // For a tail call, the return value is merely live-out and there aren't 6943 // any nodes in the DAG representing it. Return a special value to 6944 // indicate that a tail call has been emitted and no more Instructions 6945 // should be processed in the current block. 6946 if (CLI.IsTailCall) { 6947 CLI.DAG.setRoot(CLI.Chain); 6948 return std::make_pair(SDValue(), SDValue()); 6949 } 6950 6951 DEBUG(for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) { 6952 assert(InVals[i].getNode() && 6953 "LowerCall emitted a null value!"); 6954 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() && 6955 "LowerCall emitted a value with the wrong type!"); 6956 }); 6957 6958 SmallVector<SDValue, 4> ReturnValues; 6959 if (!CanLowerReturn) { 6960 // The instruction result is the result of loading from the 6961 // hidden sret parameter. 6962 SmallVector<EVT, 1> PVTs; 6963 Type *PtrRetTy = PointerType::getUnqual(OrigRetTy); 6964 6965 ComputeValueVTs(*this, DL, PtrRetTy, PVTs); 6966 assert(PVTs.size() == 1 && "Pointers should fit in one register"); 6967 EVT PtrVT = PVTs[0]; 6968 6969 unsigned NumValues = RetTys.size(); 6970 ReturnValues.resize(NumValues); 6971 SmallVector<SDValue, 4> Chains(NumValues); 6972 6973 for (unsigned i = 0; i < NumValues; ++i) { 6974 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot, 6975 CLI.DAG.getConstant(Offsets[i], CLI.DL, 6976 PtrVT)); 6977 SDValue L = CLI.DAG.getLoad( 6978 RetTys[i], CLI.DL, CLI.Chain, Add, 6979 MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]), false, 6980 false, false, 1); 6981 ReturnValues[i] = L; 6982 Chains[i] = L.getValue(1); 6983 } 6984 6985 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains); 6986 } else { 6987 // Collect the legal value parts into potentially illegal values 6988 // that correspond to the original function's return values. 6989 ISD::NodeType AssertOp = ISD::DELETED_NODE; 6990 if (CLI.RetSExt) 6991 AssertOp = ISD::AssertSext; 6992 else if (CLI.RetZExt) 6993 AssertOp = ISD::AssertZext; 6994 unsigned CurReg = 0; 6995 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 6996 EVT VT = RetTys[I]; 6997 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT); 6998 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT); 6999 7000 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg], 7001 NumRegs, RegisterVT, VT, nullptr, 7002 AssertOp)); 7003 CurReg += NumRegs; 7004 } 7005 7006 // For a function returning void, there is no return value. We can't create 7007 // such a node, so we just return a null return value in that case. In 7008 // that case, nothing will actually look at the value. 7009 if (ReturnValues.empty()) 7010 return std::make_pair(SDValue(), CLI.Chain); 7011 } 7012 7013 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL, 7014 CLI.DAG.getVTList(RetTys), ReturnValues); 7015 return std::make_pair(Res, CLI.Chain); 7016 } 7017 7018 void TargetLowering::LowerOperationWrapper(SDNode *N, 7019 SmallVectorImpl<SDValue> &Results, 7020 SelectionDAG &DAG) const { 7021 SDValue Res = LowerOperation(SDValue(N, 0), DAG); 7022 if (Res.getNode()) 7023 Results.push_back(Res); 7024 } 7025 7026 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 7027 llvm_unreachable("LowerOperation not implemented for this target!"); 7028 } 7029 7030 void 7031 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) { 7032 SDValue Op = getNonRegisterValue(V); 7033 assert((Op.getOpcode() != ISD::CopyFromReg || 7034 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 7035 "Copy from a reg to the same reg!"); 7036 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg"); 7037 7038 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7039 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, 7040 V->getType()); 7041 SDValue Chain = DAG.getEntryNode(); 7042 7043 ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) == 7044 FuncInfo.PreferredExtendType.end()) 7045 ? ISD::ANY_EXTEND 7046 : FuncInfo.PreferredExtendType[V]; 7047 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType); 7048 PendingExports.push_back(Chain); 7049 } 7050 7051 #include "llvm/CodeGen/SelectionDAGISel.h" 7052 7053 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the 7054 /// entry block, return true. This includes arguments used by switches, since 7055 /// the switch may expand into multiple basic blocks. 7056 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) { 7057 // With FastISel active, we may be splitting blocks, so force creation 7058 // of virtual registers for all non-dead arguments. 7059 if (FastISel) 7060 return A->use_empty(); 7061 7062 const BasicBlock *Entry = A->getParent()->begin(); 7063 for (const User *U : A->users()) 7064 if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U)) 7065 return false; // Use not in entry block. 7066 7067 return true; 7068 } 7069 7070 void SelectionDAGISel::LowerArguments(const Function &F) { 7071 SelectionDAG &DAG = SDB->DAG; 7072 SDLoc dl = SDB->getCurSDLoc(); 7073 const DataLayout &DL = DAG.getDataLayout(); 7074 SmallVector<ISD::InputArg, 16> Ins; 7075 7076 if (!FuncInfo->CanLowerReturn) { 7077 // Put in an sret pointer parameter before all the other parameters. 7078 SmallVector<EVT, 1> ValueVTs; 7079 ComputeValueVTs(*TLI, DAG.getDataLayout(), 7080 PointerType::getUnqual(F.getReturnType()), ValueVTs); 7081 7082 // NOTE: Assuming that a pointer will never break down to more than one VT 7083 // or one register. 7084 ISD::ArgFlagsTy Flags; 7085 Flags.setSRet(); 7086 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]); 7087 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 7088 ISD::InputArg::NoArgIndex, 0); 7089 Ins.push_back(RetArg); 7090 } 7091 7092 // Set up the incoming argument description vector. 7093 unsigned Idx = 1; 7094 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); 7095 I != E; ++I, ++Idx) { 7096 SmallVector<EVT, 4> ValueVTs; 7097 ComputeValueVTs(*TLI, DAG.getDataLayout(), I->getType(), ValueVTs); 7098 bool isArgValueUsed = !I->use_empty(); 7099 unsigned PartBase = 0; 7100 Type *FinalType = I->getType(); 7101 if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal)) 7102 FinalType = cast<PointerType>(FinalType)->getElementType(); 7103 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters( 7104 FinalType, F.getCallingConv(), F.isVarArg()); 7105 for (unsigned Value = 0, NumValues = ValueVTs.size(); 7106 Value != NumValues; ++Value) { 7107 EVT VT = ValueVTs[Value]; 7108 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); 7109 ISD::ArgFlagsTy Flags; 7110 unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); 7111 7112 if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt)) 7113 Flags.setZExt(); 7114 if (F.getAttributes().hasAttribute(Idx, Attribute::SExt)) 7115 Flags.setSExt(); 7116 if (F.getAttributes().hasAttribute(Idx, Attribute::InReg)) 7117 Flags.setInReg(); 7118 if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet)) 7119 Flags.setSRet(); 7120 if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal)) 7121 Flags.setByVal(); 7122 if (F.getAttributes().hasAttribute(Idx, Attribute::InAlloca)) { 7123 Flags.setInAlloca(); 7124 // Set the byval flag for CCAssignFn callbacks that don't know about 7125 // inalloca. This way we can know how many bytes we should've allocated 7126 // and how many bytes a callee cleanup function will pop. If we port 7127 // inalloca to more targets, we'll have to add custom inalloca handling 7128 // in the various CC lowering callbacks. 7129 Flags.setByVal(); 7130 } 7131 if (Flags.isByVal() || Flags.isInAlloca()) { 7132 PointerType *Ty = cast<PointerType>(I->getType()); 7133 Type *ElementTy = Ty->getElementType(); 7134 Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); 7135 // For ByVal, alignment should be passed from FE. BE will guess if 7136 // this info is not there but there are cases it cannot get right. 7137 unsigned FrameAlign; 7138 if (F.getParamAlignment(Idx)) 7139 FrameAlign = F.getParamAlignment(Idx); 7140 else 7141 FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL); 7142 Flags.setByValAlign(FrameAlign); 7143 } 7144 if (F.getAttributes().hasAttribute(Idx, Attribute::Nest)) 7145 Flags.setNest(); 7146 if (NeedsRegBlock) 7147 Flags.setInConsecutiveRegs(); 7148 Flags.setOrigAlign(OriginalAlignment); 7149 7150 MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT); 7151 unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT); 7152 for (unsigned i = 0; i != NumRegs; ++i) { 7153 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed, 7154 Idx-1, PartBase+i*RegisterVT.getStoreSize()); 7155 if (NumRegs > 1 && i == 0) 7156 MyFlags.Flags.setSplit(); 7157 // if it isn't first piece, alignment must be 1 7158 else if (i > 0) 7159 MyFlags.Flags.setOrigAlign(1); 7160 Ins.push_back(MyFlags); 7161 } 7162 if (NeedsRegBlock && Value == NumValues - 1) 7163 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast(); 7164 PartBase += VT.getStoreSize(); 7165 } 7166 } 7167 7168 // Call the target to set up the argument values. 7169 SmallVector<SDValue, 8> InVals; 7170 SDValue NewRoot = TLI->LowerFormalArguments( 7171 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals); 7172 7173 // Verify that the target's LowerFormalArguments behaved as expected. 7174 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other && 7175 "LowerFormalArguments didn't return a valid chain!"); 7176 assert(InVals.size() == Ins.size() && 7177 "LowerFormalArguments didn't emit the correct number of values!"); 7178 DEBUG({ 7179 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 7180 assert(InVals[i].getNode() && 7181 "LowerFormalArguments emitted a null value!"); 7182 assert(EVT(Ins[i].VT) == InVals[i].getValueType() && 7183 "LowerFormalArguments emitted a value with the wrong type!"); 7184 } 7185 }); 7186 7187 // Update the DAG with the new chain value resulting from argument lowering. 7188 DAG.setRoot(NewRoot); 7189 7190 // Set up the argument values. 7191 unsigned i = 0; 7192 Idx = 1; 7193 if (!FuncInfo->CanLowerReturn) { 7194 // Create a virtual register for the sret pointer, and put in a copy 7195 // from the sret argument into it. 7196 SmallVector<EVT, 1> ValueVTs; 7197 ComputeValueVTs(*TLI, DAG.getDataLayout(), 7198 PointerType::getUnqual(F.getReturnType()), ValueVTs); 7199 MVT VT = ValueVTs[0].getSimpleVT(); 7200 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT); 7201 ISD::NodeType AssertOp = ISD::DELETED_NODE; 7202 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, 7203 RegVT, VT, nullptr, AssertOp); 7204 7205 MachineFunction& MF = SDB->DAG.getMachineFunction(); 7206 MachineRegisterInfo& RegInfo = MF.getRegInfo(); 7207 unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT)); 7208 FuncInfo->DemoteRegister = SRetReg; 7209 NewRoot = 7210 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue); 7211 DAG.setRoot(NewRoot); 7212 7213 // i indexes lowered arguments. Bump it past the hidden sret argument. 7214 // Idx indexes LLVM arguments. Don't touch it. 7215 ++i; 7216 } 7217 7218 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; 7219 ++I, ++Idx) { 7220 SmallVector<SDValue, 4> ArgValues; 7221 SmallVector<EVT, 4> ValueVTs; 7222 ComputeValueVTs(*TLI, DAG.getDataLayout(), I->getType(), ValueVTs); 7223 unsigned NumValues = ValueVTs.size(); 7224 7225 // If this argument is unused then remember its value. It is used to generate 7226 // debugging information. 7227 if (I->use_empty() && NumValues) { 7228 SDB->setUnusedArgValue(I, InVals[i]); 7229 7230 // Also remember any frame index for use in FastISel. 7231 if (FrameIndexSDNode *FI = 7232 dyn_cast<FrameIndexSDNode>(InVals[i].getNode())) 7233 FuncInfo->setArgumentFrameIndex(I, FI->getIndex()); 7234 } 7235 7236 for (unsigned Val = 0; Val != NumValues; ++Val) { 7237 EVT VT = ValueVTs[Val]; 7238 MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT); 7239 unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT); 7240 7241 if (!I->use_empty()) { 7242 ISD::NodeType AssertOp = ISD::DELETED_NODE; 7243 if (F.getAttributes().hasAttribute(Idx, Attribute::SExt)) 7244 AssertOp = ISD::AssertSext; 7245 else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt)) 7246 AssertOp = ISD::AssertZext; 7247 7248 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], 7249 NumParts, PartVT, VT, 7250 nullptr, AssertOp)); 7251 } 7252 7253 i += NumParts; 7254 } 7255 7256 // We don't need to do anything else for unused arguments. 7257 if (ArgValues.empty()) 7258 continue; 7259 7260 // Note down frame index. 7261 if (FrameIndexSDNode *FI = 7262 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode())) 7263 FuncInfo->setArgumentFrameIndex(I, FI->getIndex()); 7264 7265 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues), 7266 SDB->getCurSDLoc()); 7267 7268 SDB->setValue(I, Res); 7269 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) { 7270 if (LoadSDNode *LNode = 7271 dyn_cast<LoadSDNode>(Res.getOperand(0).getNode())) 7272 if (FrameIndexSDNode *FI = 7273 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) 7274 FuncInfo->setArgumentFrameIndex(I, FI->getIndex()); 7275 } 7276 7277 // If this argument is live outside of the entry block, insert a copy from 7278 // wherever we got it to the vreg that other BB's will reference it as. 7279 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) { 7280 // If we can, though, try to skip creating an unnecessary vreg. 7281 // FIXME: This isn't very clean... it would be nice to make this more 7282 // general. It's also subtly incompatible with the hacks FastISel 7283 // uses with vregs. 7284 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); 7285 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 7286 FuncInfo->ValueMap[I] = Reg; 7287 continue; 7288 } 7289 } 7290 if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) { 7291 FuncInfo->InitializeRegForValue(I); 7292 SDB->CopyToExportRegsIfNeeded(I); 7293 } 7294 } 7295 7296 assert(i == InVals.size() && "Argument register count mismatch!"); 7297 7298 // Finally, if the target has anything special to do, allow it to do so. 7299 EmitFunctionEntryCode(); 7300 } 7301 7302 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 7303 /// ensure constants are generated when needed. Remember the virtual registers 7304 /// that need to be added to the Machine PHI nodes as input. We cannot just 7305 /// directly add them, because expansion might result in multiple MBB's for one 7306 /// BB. As such, the start of the BB might correspond to a different MBB than 7307 /// the end. 7308 /// 7309 void 7310 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 7311 const TerminatorInst *TI = LLVMBB->getTerminator(); 7312 7313 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 7314 7315 // Check PHI nodes in successors that expect a value to be available from this 7316 // block. 7317 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 7318 const BasicBlock *SuccBB = TI->getSuccessor(succ); 7319 if (!isa<PHINode>(SuccBB->begin())) continue; 7320 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 7321 7322 // If this terminator has multiple identical successors (common for 7323 // switches), only handle each succ once. 7324 if (!SuccsHandled.insert(SuccMBB).second) 7325 continue; 7326 7327 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 7328 7329 // At this point we know that there is a 1-1 correspondence between LLVM PHI 7330 // nodes and Machine PHI nodes, but the incoming operands have not been 7331 // emitted yet. 7332 for (BasicBlock::const_iterator I = SuccBB->begin(); 7333 const PHINode *PN = dyn_cast<PHINode>(I); ++I) { 7334 // Ignore dead phi's. 7335 if (PN->use_empty()) continue; 7336 7337 // Skip empty types 7338 if (PN->getType()->isEmptyTy()) 7339 continue; 7340 7341 unsigned Reg; 7342 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 7343 7344 if (const Constant *C = dyn_cast<Constant>(PHIOp)) { 7345 unsigned &RegOut = ConstantsOut[C]; 7346 if (RegOut == 0) { 7347 RegOut = FuncInfo.CreateRegs(C->getType()); 7348 CopyValueToVirtualRegister(C, RegOut); 7349 } 7350 Reg = RegOut; 7351 } else { 7352 DenseMap<const Value *, unsigned>::iterator I = 7353 FuncInfo.ValueMap.find(PHIOp); 7354 if (I != FuncInfo.ValueMap.end()) 7355 Reg = I->second; 7356 else { 7357 assert(isa<AllocaInst>(PHIOp) && 7358 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 7359 "Didn't codegen value into a register!??"); 7360 Reg = FuncInfo.CreateRegs(PHIOp->getType()); 7361 CopyValueToVirtualRegister(PHIOp, Reg); 7362 } 7363 } 7364 7365 // Remember that this register needs to added to the machine PHI node as 7366 // the input for this MBB. 7367 SmallVector<EVT, 4> ValueVTs; 7368 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7369 ComputeValueVTs(TLI, DAG.getDataLayout(), PN->getType(), ValueVTs); 7370 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) { 7371 EVT VT = ValueVTs[vti]; 7372 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT); 7373 for (unsigned i = 0, e = NumRegisters; i != e; ++i) 7374 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i)); 7375 Reg += NumRegisters; 7376 } 7377 } 7378 } 7379 7380 ConstantsOut.clear(); 7381 } 7382 7383 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB 7384 /// is 0. 7385 MachineBasicBlock * 7386 SelectionDAGBuilder::StackProtectorDescriptor:: 7387 AddSuccessorMBB(const BasicBlock *BB, 7388 MachineBasicBlock *ParentMBB, 7389 bool IsLikely, 7390 MachineBasicBlock *SuccMBB) { 7391 // If SuccBB has not been created yet, create it. 7392 if (!SuccMBB) { 7393 MachineFunction *MF = ParentMBB->getParent(); 7394 MachineFunction::iterator BBI = ParentMBB; 7395 SuccMBB = MF->CreateMachineBasicBlock(BB); 7396 MF->insert(++BBI, SuccMBB); 7397 } 7398 // Add it as a successor of ParentMBB. 7399 ParentMBB->addSuccessor( 7400 SuccMBB, BranchProbabilityInfo::getBranchWeightStackProtector(IsLikely)); 7401 return SuccMBB; 7402 } 7403 7404 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) { 7405 MachineFunction::iterator I = MBB; 7406 if (++I == FuncInfo.MF->end()) 7407 return nullptr; 7408 return I; 7409 } 7410 7411 /// During lowering new call nodes can be created (such as memset, etc.). 7412 /// Those will become new roots of the current DAG, but complications arise 7413 /// when they are tail calls. In such cases, the call lowering will update 7414 /// the root, but the builder still needs to know that a tail call has been 7415 /// lowered in order to avoid generating an additional return. 7416 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) { 7417 // If the node is null, we do have a tail call. 7418 if (MaybeTC.getNode() != nullptr) 7419 DAG.setRoot(MaybeTC); 7420 else 7421 HasTailCall = true; 7422 } 7423 7424 bool SelectionDAGBuilder::isDense(const CaseClusterVector &Clusters, 7425 unsigned *TotalCases, unsigned First, 7426 unsigned Last) { 7427 assert(Last >= First); 7428 assert(TotalCases[Last] >= TotalCases[First]); 7429 7430 APInt LowCase = Clusters[First].Low->getValue(); 7431 APInt HighCase = Clusters[Last].High->getValue(); 7432 assert(LowCase.getBitWidth() == HighCase.getBitWidth()); 7433 7434 // FIXME: A range of consecutive cases has 100% density, but only requires one 7435 // comparison to lower. We should discriminate against such consecutive ranges 7436 // in jump tables. 7437 7438 uint64_t Diff = (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100); 7439 uint64_t Range = Diff + 1; 7440 7441 uint64_t NumCases = 7442 TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]); 7443 7444 assert(NumCases < UINT64_MAX / 100); 7445 assert(Range >= NumCases); 7446 7447 return NumCases * 100 >= Range * MinJumpTableDensity; 7448 } 7449 7450 static inline bool areJTsAllowed(const TargetLowering &TLI) { 7451 return TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 7452 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other); 7453 } 7454 7455 bool SelectionDAGBuilder::buildJumpTable(CaseClusterVector &Clusters, 7456 unsigned First, unsigned Last, 7457 const SwitchInst *SI, 7458 MachineBasicBlock *DefaultMBB, 7459 CaseCluster &JTCluster) { 7460 assert(First <= Last); 7461 7462 uint32_t Weight = 0; 7463 unsigned NumCmps = 0; 7464 std::vector<MachineBasicBlock*> Table; 7465 DenseMap<MachineBasicBlock*, uint32_t> JTWeights; 7466 for (unsigned I = First; I <= Last; ++I) { 7467 assert(Clusters[I].Kind == CC_Range); 7468 Weight += Clusters[I].Weight; 7469 assert(Weight >= Clusters[I].Weight && "Weight overflow!"); 7470 APInt Low = Clusters[I].Low->getValue(); 7471 APInt High = Clusters[I].High->getValue(); 7472 NumCmps += (Low == High) ? 1 : 2; 7473 if (I != First) { 7474 // Fill the gap between this and the previous cluster. 7475 APInt PreviousHigh = Clusters[I - 1].High->getValue(); 7476 assert(PreviousHigh.slt(Low)); 7477 uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1; 7478 for (uint64_t J = 0; J < Gap; J++) 7479 Table.push_back(DefaultMBB); 7480 } 7481 uint64_t ClusterSize = (High - Low).getLimitedValue() + 1; 7482 for (uint64_t J = 0; J < ClusterSize; ++J) 7483 Table.push_back(Clusters[I].MBB); 7484 JTWeights[Clusters[I].MBB] += Clusters[I].Weight; 7485 } 7486 7487 unsigned NumDests = JTWeights.size(); 7488 if (isSuitableForBitTests(NumDests, NumCmps, 7489 Clusters[First].Low->getValue(), 7490 Clusters[Last].High->getValue())) { 7491 // Clusters[First..Last] should be lowered as bit tests instead. 7492 return false; 7493 } 7494 7495 // Create the MBB that will load from and jump through the table. 7496 // Note: We create it here, but it's not inserted into the function yet. 7497 MachineFunction *CurMF = FuncInfo.MF; 7498 MachineBasicBlock *JumpTableMBB = 7499 CurMF->CreateMachineBasicBlock(SI->getParent()); 7500 7501 // Add successors. Note: use table order for determinism. 7502 SmallPtrSet<MachineBasicBlock *, 8> Done; 7503 for (MachineBasicBlock *Succ : Table) { 7504 if (Done.count(Succ)) 7505 continue; 7506 addSuccessorWithWeight(JumpTableMBB, Succ, JTWeights[Succ]); 7507 Done.insert(Succ); 7508 } 7509 7510 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7511 unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI.getJumpTableEncoding()) 7512 ->createJumpTableIndex(Table); 7513 7514 // Set up the jump table info. 7515 JumpTable JT(-1U, JTI, JumpTableMBB, nullptr); 7516 JumpTableHeader JTH(Clusters[First].Low->getValue(), 7517 Clusters[Last].High->getValue(), SI->getCondition(), 7518 nullptr, false); 7519 JTCases.emplace_back(std::move(JTH), std::move(JT)); 7520 7521 JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High, 7522 JTCases.size() - 1, Weight); 7523 return true; 7524 } 7525 7526 void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters, 7527 const SwitchInst *SI, 7528 MachineBasicBlock *DefaultMBB) { 7529 #ifndef NDEBUG 7530 // Clusters must be non-empty, sorted, and only contain Range clusters. 7531 assert(!Clusters.empty()); 7532 for (CaseCluster &C : Clusters) 7533 assert(C.Kind == CC_Range); 7534 for (unsigned i = 1, e = Clusters.size(); i < e; ++i) 7535 assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue())); 7536 #endif 7537 7538 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7539 if (!areJTsAllowed(TLI)) 7540 return; 7541 7542 const int64_t N = Clusters.size(); 7543 const unsigned MinJumpTableSize = TLI.getMinimumJumpTableEntries(); 7544 7545 // TotalCases[i]: Total nbr of cases in Clusters[0..i]. 7546 SmallVector<unsigned, 8> TotalCases(N); 7547 7548 for (unsigned i = 0; i < N; ++i) { 7549 APInt Hi = Clusters[i].High->getValue(); 7550 APInt Lo = Clusters[i].Low->getValue(); 7551 TotalCases[i] = (Hi - Lo).getLimitedValue() + 1; 7552 if (i != 0) 7553 TotalCases[i] += TotalCases[i - 1]; 7554 } 7555 7556 if (N >= MinJumpTableSize && isDense(Clusters, &TotalCases[0], 0, N - 1)) { 7557 // Cheap case: the whole range might be suitable for jump table. 7558 CaseCluster JTCluster; 7559 if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) { 7560 Clusters[0] = JTCluster; 7561 Clusters.resize(1); 7562 return; 7563 } 7564 } 7565 7566 // The algorithm below is not suitable for -O0. 7567 if (TM.getOptLevel() == CodeGenOpt::None) 7568 return; 7569 7570 // Split Clusters into minimum number of dense partitions. The algorithm uses 7571 // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code 7572 // for the Case Statement'" (1994), but builds the MinPartitions array in 7573 // reverse order to make it easier to reconstruct the partitions in ascending 7574 // order. In the choice between two optimal partitionings, it picks the one 7575 // which yields more jump tables. 7576 7577 // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1]. 7578 SmallVector<unsigned, 8> MinPartitions(N); 7579 // LastElement[i] is the last element of the partition starting at i. 7580 SmallVector<unsigned, 8> LastElement(N); 7581 // NumTables[i]: nbr of >= MinJumpTableSize partitions from Clusters[i..N-1]. 7582 SmallVector<unsigned, 8> NumTables(N); 7583 7584 // Base case: There is only one way to partition Clusters[N-1]. 7585 MinPartitions[N - 1] = 1; 7586 LastElement[N - 1] = N - 1; 7587 assert(MinJumpTableSize > 1); 7588 NumTables[N - 1] = 0; 7589 7590 // Note: loop indexes are signed to avoid underflow. 7591 for (int64_t i = N - 2; i >= 0; i--) { 7592 // Find optimal partitioning of Clusters[i..N-1]. 7593 // Baseline: Put Clusters[i] into a partition on its own. 7594 MinPartitions[i] = MinPartitions[i + 1] + 1; 7595 LastElement[i] = i; 7596 NumTables[i] = NumTables[i + 1]; 7597 7598 // Search for a solution that results in fewer partitions. 7599 for (int64_t j = N - 1; j > i; j--) { 7600 // Try building a partition from Clusters[i..j]. 7601 if (isDense(Clusters, &TotalCases[0], i, j)) { 7602 unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]); 7603 bool IsTable = j - i + 1 >= MinJumpTableSize; 7604 unsigned Tables = IsTable + (j == N - 1 ? 0 : NumTables[j + 1]); 7605 7606 // If this j leads to fewer partitions, or same number of partitions 7607 // with more lookup tables, it is a better partitioning. 7608 if (NumPartitions < MinPartitions[i] || 7609 (NumPartitions == MinPartitions[i] && Tables > NumTables[i])) { 7610 MinPartitions[i] = NumPartitions; 7611 LastElement[i] = j; 7612 NumTables[i] = Tables; 7613 } 7614 } 7615 } 7616 } 7617 7618 // Iterate over the partitions, replacing some with jump tables in-place. 7619 unsigned DstIndex = 0; 7620 for (unsigned First = 0, Last; First < N; First = Last + 1) { 7621 Last = LastElement[First]; 7622 assert(Last >= First); 7623 assert(DstIndex <= First); 7624 unsigned NumClusters = Last - First + 1; 7625 7626 CaseCluster JTCluster; 7627 if (NumClusters >= MinJumpTableSize && 7628 buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) { 7629 Clusters[DstIndex++] = JTCluster; 7630 } else { 7631 for (unsigned I = First; I <= Last; ++I) 7632 std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I])); 7633 } 7634 } 7635 Clusters.resize(DstIndex); 7636 } 7637 7638 bool SelectionDAGBuilder::rangeFitsInWord(const APInt &Low, const APInt &High) { 7639 // FIXME: Using the pointer type doesn't seem ideal. 7640 uint64_t BW = DAG.getDataLayout().getPointerSizeInBits(); 7641 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1; 7642 return Range <= BW; 7643 } 7644 7645 bool SelectionDAGBuilder::isSuitableForBitTests(unsigned NumDests, 7646 unsigned NumCmps, 7647 const APInt &Low, 7648 const APInt &High) { 7649 // FIXME: I don't think NumCmps is the correct metric: a single case and a 7650 // range of cases both require only one branch to lower. Just looking at the 7651 // number of clusters and destinations should be enough to decide whether to 7652 // build bit tests. 7653 7654 // To lower a range with bit tests, the range must fit the bitwidth of a 7655 // machine word. 7656 if (!rangeFitsInWord(Low, High)) 7657 return false; 7658 7659 // Decide whether it's profitable to lower this range with bit tests. Each 7660 // destination requires a bit test and branch, and there is an overall range 7661 // check branch. For a small number of clusters, separate comparisons might be 7662 // cheaper, and for many destinations, splitting the range might be better. 7663 return (NumDests == 1 && NumCmps >= 3) || 7664 (NumDests == 2 && NumCmps >= 5) || 7665 (NumDests == 3 && NumCmps >= 6); 7666 } 7667 7668 bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters, 7669 unsigned First, unsigned Last, 7670 const SwitchInst *SI, 7671 CaseCluster &BTCluster) { 7672 assert(First <= Last); 7673 if (First == Last) 7674 return false; 7675 7676 BitVector Dests(FuncInfo.MF->getNumBlockIDs()); 7677 unsigned NumCmps = 0; 7678 for (int64_t I = First; I <= Last; ++I) { 7679 assert(Clusters[I].Kind == CC_Range); 7680 Dests.set(Clusters[I].MBB->getNumber()); 7681 NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2; 7682 } 7683 unsigned NumDests = Dests.count(); 7684 7685 APInt Low = Clusters[First].Low->getValue(); 7686 APInt High = Clusters[Last].High->getValue(); 7687 assert(Low.slt(High)); 7688 7689 if (!isSuitableForBitTests(NumDests, NumCmps, Low, High)) 7690 return false; 7691 7692 APInt LowBound; 7693 APInt CmpRange; 7694 7695 const int BitWidth = DAG.getTargetLoweringInfo() 7696 .getPointerTy(DAG.getDataLayout()) 7697 .getSizeInBits(); 7698 assert(rangeFitsInWord(Low, High) && "Case range must fit in bit mask!"); 7699 7700 if (Low.isNonNegative() && High.slt(BitWidth)) { 7701 // Optimize the case where all the case values fit in a 7702 // word without having to subtract minValue. In this case, 7703 // we can optimize away the subtraction. 7704 LowBound = APInt::getNullValue(Low.getBitWidth()); 7705 CmpRange = High; 7706 } else { 7707 LowBound = Low; 7708 CmpRange = High - Low; 7709 } 7710 7711 CaseBitsVector CBV; 7712 uint32_t TotalWeight = 0; 7713 for (unsigned i = First; i <= Last; ++i) { 7714 // Find the CaseBits for this destination. 7715 unsigned j; 7716 for (j = 0; j < CBV.size(); ++j) 7717 if (CBV[j].BB == Clusters[i].MBB) 7718 break; 7719 if (j == CBV.size()) 7720 CBV.push_back(CaseBits(0, Clusters[i].MBB, 0, 0)); 7721 CaseBits *CB = &CBV[j]; 7722 7723 // Update Mask, Bits and ExtraWeight. 7724 uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue(); 7725 uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue(); 7726 assert(Hi >= Lo && Hi < 64 && "Invalid bit case!"); 7727 CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo; 7728 CB->Bits += Hi - Lo + 1; 7729 CB->ExtraWeight += Clusters[i].Weight; 7730 TotalWeight += Clusters[i].Weight; 7731 assert(TotalWeight >= Clusters[i].Weight && "Weight overflow!"); 7732 } 7733 7734 BitTestInfo BTI; 7735 std::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) { 7736 // Sort by weight first, number of bits second. 7737 if (a.ExtraWeight != b.ExtraWeight) 7738 return a.ExtraWeight > b.ExtraWeight; 7739 return a.Bits > b.Bits; 7740 }); 7741 7742 for (auto &CB : CBV) { 7743 MachineBasicBlock *BitTestBB = 7744 FuncInfo.MF->CreateMachineBasicBlock(SI->getParent()); 7745 BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraWeight)); 7746 } 7747 BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange), 7748 SI->getCondition(), -1U, MVT::Other, false, nullptr, 7749 nullptr, std::move(BTI)); 7750 7751 BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High, 7752 BitTestCases.size() - 1, TotalWeight); 7753 return true; 7754 } 7755 7756 void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters, 7757 const SwitchInst *SI) { 7758 // Partition Clusters into as few subsets as possible, where each subset has a 7759 // range that fits in a machine word and has <= 3 unique destinations. 7760 7761 #ifndef NDEBUG 7762 // Clusters must be sorted and contain Range or JumpTable clusters. 7763 assert(!Clusters.empty()); 7764 assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable); 7765 for (const CaseCluster &C : Clusters) 7766 assert(C.Kind == CC_Range || C.Kind == CC_JumpTable); 7767 for (unsigned i = 1; i < Clusters.size(); ++i) 7768 assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue())); 7769 #endif 7770 7771 // The algorithm below is not suitable for -O0. 7772 if (TM.getOptLevel() == CodeGenOpt::None) 7773 return; 7774 7775 // If target does not have legal shift left, do not emit bit tests at all. 7776 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7777 EVT PTy = TLI.getPointerTy(DAG.getDataLayout()); 7778 if (!TLI.isOperationLegal(ISD::SHL, PTy)) 7779 return; 7780 7781 int BitWidth = PTy.getSizeInBits(); 7782 const int64_t N = Clusters.size(); 7783 7784 // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1]. 7785 SmallVector<unsigned, 8> MinPartitions(N); 7786 // LastElement[i] is the last element of the partition starting at i. 7787 SmallVector<unsigned, 8> LastElement(N); 7788 7789 // FIXME: This might not be the best algorithm for finding bit test clusters. 7790 7791 // Base case: There is only one way to partition Clusters[N-1]. 7792 MinPartitions[N - 1] = 1; 7793 LastElement[N - 1] = N - 1; 7794 7795 // Note: loop indexes are signed to avoid underflow. 7796 for (int64_t i = N - 2; i >= 0; --i) { 7797 // Find optimal partitioning of Clusters[i..N-1]. 7798 // Baseline: Put Clusters[i] into a partition on its own. 7799 MinPartitions[i] = MinPartitions[i + 1] + 1; 7800 LastElement[i] = i; 7801 7802 // Search for a solution that results in fewer partitions. 7803 // Note: the search is limited by BitWidth, reducing time complexity. 7804 for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) { 7805 // Try building a partition from Clusters[i..j]. 7806 7807 // Check the range. 7808 if (!rangeFitsInWord(Clusters[i].Low->getValue(), 7809 Clusters[j].High->getValue())) 7810 continue; 7811 7812 // Check nbr of destinations and cluster types. 7813 // FIXME: This works, but doesn't seem very efficient. 7814 bool RangesOnly = true; 7815 BitVector Dests(FuncInfo.MF->getNumBlockIDs()); 7816 for (int64_t k = i; k <= j; k++) { 7817 if (Clusters[k].Kind != CC_Range) { 7818 RangesOnly = false; 7819 break; 7820 } 7821 Dests.set(Clusters[k].MBB->getNumber()); 7822 } 7823 if (!RangesOnly || Dests.count() > 3) 7824 break; 7825 7826 // Check if it's a better partition. 7827 unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]); 7828 if (NumPartitions < MinPartitions[i]) { 7829 // Found a better partition. 7830 MinPartitions[i] = NumPartitions; 7831 LastElement[i] = j; 7832 } 7833 } 7834 } 7835 7836 // Iterate over the partitions, replacing with bit-test clusters in-place. 7837 unsigned DstIndex = 0; 7838 for (unsigned First = 0, Last; First < N; First = Last + 1) { 7839 Last = LastElement[First]; 7840 assert(First <= Last); 7841 assert(DstIndex <= First); 7842 7843 CaseCluster BitTestCluster; 7844 if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) { 7845 Clusters[DstIndex++] = BitTestCluster; 7846 } else { 7847 size_t NumClusters = Last - First + 1; 7848 std::memmove(&Clusters[DstIndex], &Clusters[First], 7849 sizeof(Clusters[0]) * NumClusters); 7850 DstIndex += NumClusters; 7851 } 7852 } 7853 Clusters.resize(DstIndex); 7854 } 7855 7856 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond, 7857 MachineBasicBlock *SwitchMBB, 7858 MachineBasicBlock *DefaultMBB) { 7859 MachineFunction *CurMF = FuncInfo.MF; 7860 MachineBasicBlock *NextMBB = nullptr; 7861 MachineFunction::iterator BBI = W.MBB; 7862 if (++BBI != FuncInfo.MF->end()) 7863 NextMBB = BBI; 7864 7865 unsigned Size = W.LastCluster - W.FirstCluster + 1; 7866 7867 BranchProbabilityInfo *BPI = FuncInfo.BPI; 7868 7869 if (Size == 2 && W.MBB == SwitchMBB) { 7870 // If any two of the cases has the same destination, and if one value 7871 // is the same as the other, but has one bit unset that the other has set, 7872 // use bit manipulation to do two compares at once. For example: 7873 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" 7874 // TODO: This could be extended to merge any 2 cases in switches with 3 7875 // cases. 7876 // TODO: Handle cases where W.CaseBB != SwitchBB. 7877 CaseCluster &Small = *W.FirstCluster; 7878 CaseCluster &Big = *W.LastCluster; 7879 7880 if (Small.Low == Small.High && Big.Low == Big.High && 7881 Small.MBB == Big.MBB) { 7882 const APInt &SmallValue = Small.Low->getValue(); 7883 const APInt &BigValue = Big.Low->getValue(); 7884 7885 // Check that there is only one bit different. 7886 APInt CommonBit = BigValue ^ SmallValue; 7887 if (CommonBit.isPowerOf2()) { 7888 SDValue CondLHS = getValue(Cond); 7889 EVT VT = CondLHS.getValueType(); 7890 SDLoc DL = getCurSDLoc(); 7891 7892 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS, 7893 DAG.getConstant(CommonBit, DL, VT)); 7894 SDValue Cond = DAG.getSetCC( 7895 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT), 7896 ISD::SETEQ); 7897 7898 // Update successor info. 7899 // Both Small and Big will jump to Small.BB, so we sum up the weights. 7900 addSuccessorWithWeight(SwitchMBB, Small.MBB, Small.Weight + Big.Weight); 7901 addSuccessorWithWeight( 7902 SwitchMBB, DefaultMBB, 7903 // The default destination is the first successor in IR. 7904 BPI ? BPI->getEdgeWeight(SwitchMBB->getBasicBlock(), (unsigned)0) 7905 : 0); 7906 7907 // Insert the true branch. 7908 SDValue BrCond = 7909 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond, 7910 DAG.getBasicBlock(Small.MBB)); 7911 // Insert the false branch. 7912 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond, 7913 DAG.getBasicBlock(DefaultMBB)); 7914 7915 DAG.setRoot(BrCond); 7916 return; 7917 } 7918 } 7919 } 7920 7921 if (TM.getOptLevel() != CodeGenOpt::None) { 7922 // Order cases by weight so the most likely case will be checked first. 7923 std::sort(W.FirstCluster, W.LastCluster + 1, 7924 [](const CaseCluster &a, const CaseCluster &b) { 7925 return a.Weight > b.Weight; 7926 }); 7927 7928 // Rearrange the case blocks so that the last one falls through if possible 7929 // without without changing the order of weights. 7930 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) { 7931 --I; 7932 if (I->Weight > W.LastCluster->Weight) 7933 break; 7934 if (I->Kind == CC_Range && I->MBB == NextMBB) { 7935 std::swap(*I, *W.LastCluster); 7936 break; 7937 } 7938 } 7939 } 7940 7941 // Compute total weight. 7942 uint32_t UnhandledWeights = 0; 7943 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) { 7944 UnhandledWeights += I->Weight; 7945 assert(UnhandledWeights >= I->Weight && "Weight overflow!"); 7946 } 7947 7948 MachineBasicBlock *CurMBB = W.MBB; 7949 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { 7950 MachineBasicBlock *Fallthrough; 7951 if (I == W.LastCluster) { 7952 // For the last cluster, fall through to the default destination. 7953 Fallthrough = DefaultMBB; 7954 } else { 7955 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); 7956 CurMF->insert(BBI, Fallthrough); 7957 // Put Cond in a virtual register to make it available from the new blocks. 7958 ExportFromCurrentBlock(Cond); 7959 } 7960 7961 switch (I->Kind) { 7962 case CC_JumpTable: { 7963 // FIXME: Optimize away range check based on pivot comparisons. 7964 JumpTableHeader *JTH = &JTCases[I->JTCasesIndex].first; 7965 JumpTable *JT = &JTCases[I->JTCasesIndex].second; 7966 7967 // The jump block hasn't been inserted yet; insert it here. 7968 MachineBasicBlock *JumpMBB = JT->MBB; 7969 CurMF->insert(BBI, JumpMBB); 7970 addSuccessorWithWeight(CurMBB, Fallthrough); 7971 addSuccessorWithWeight(CurMBB, JumpMBB); 7972 7973 // The jump table header will be inserted in our current block, do the 7974 // range check, and fall through to our fallthrough block. 7975 JTH->HeaderBB = CurMBB; 7976 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. 7977 7978 // If we're in the right place, emit the jump table header right now. 7979 if (CurMBB == SwitchMBB) { 7980 visitJumpTableHeader(*JT, *JTH, SwitchMBB); 7981 JTH->Emitted = true; 7982 } 7983 break; 7984 } 7985 case CC_BitTests: { 7986 // FIXME: Optimize away range check based on pivot comparisons. 7987 BitTestBlock *BTB = &BitTestCases[I->BTCasesIndex]; 7988 7989 // The bit test blocks haven't been inserted yet; insert them here. 7990 for (BitTestCase &BTC : BTB->Cases) 7991 CurMF->insert(BBI, BTC.ThisBB); 7992 7993 // Fill in fields of the BitTestBlock. 7994 BTB->Parent = CurMBB; 7995 BTB->Default = Fallthrough; 7996 7997 // If we're in the right place, emit the bit test header header right now. 7998 if (CurMBB ==SwitchMBB) { 7999 visitBitTestHeader(*BTB, SwitchMBB); 8000 BTB->Emitted = true; 8001 } 8002 break; 8003 } 8004 case CC_Range: { 8005 const Value *RHS, *LHS, *MHS; 8006 ISD::CondCode CC; 8007 if (I->Low == I->High) { 8008 // Check Cond == I->Low. 8009 CC = ISD::SETEQ; 8010 LHS = Cond; 8011 RHS=I->Low; 8012 MHS = nullptr; 8013 } else { 8014 // Check I->Low <= Cond <= I->High. 8015 CC = ISD::SETLE; 8016 LHS = I->Low; 8017 MHS = Cond; 8018 RHS = I->High; 8019 } 8020 8021 // The false weight is the sum of all unhandled cases. 8022 UnhandledWeights -= I->Weight; 8023 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, I->Weight, 8024 UnhandledWeights); 8025 8026 if (CurMBB == SwitchMBB) 8027 visitSwitchCase(CB, SwitchMBB); 8028 else 8029 SwitchCases.push_back(CB); 8030 8031 break; 8032 } 8033 } 8034 CurMBB = Fallthrough; 8035 } 8036 } 8037 8038 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC, 8039 CaseClusterIt First, 8040 CaseClusterIt Last) { 8041 return std::count_if(First, Last + 1, [&](const CaseCluster &X) { 8042 if (X.Weight != CC.Weight) 8043 return X.Weight > CC.Weight; 8044 8045 // Ties are broken by comparing the case value. 8046 return X.Low->getValue().slt(CC.Low->getValue()); 8047 }); 8048 } 8049 8050 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList, 8051 const SwitchWorkListItem &W, 8052 Value *Cond, 8053 MachineBasicBlock *SwitchMBB) { 8054 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) && 8055 "Clusters not sorted?"); 8056 8057 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!"); 8058 8059 // Balance the tree based on branch weights to create a near-optimal (in terms 8060 // of search time given key frequency) binary search tree. See e.g. Kurt 8061 // Mehlhorn "Nearly Optimal Binary Search Trees" (1975). 8062 CaseClusterIt LastLeft = W.FirstCluster; 8063 CaseClusterIt FirstRight = W.LastCluster; 8064 uint32_t LeftWeight = LastLeft->Weight; 8065 uint32_t RightWeight = FirstRight->Weight; 8066 8067 // Move LastLeft and FirstRight towards each other from opposite directions to 8068 // find a partitioning of the clusters which balances the weight on both 8069 // sides. If LeftWeight and RightWeight are equal, alternate which side is 8070 // taken to ensure 0-weight nodes are distributed evenly. 8071 unsigned I = 0; 8072 while (LastLeft + 1 < FirstRight) { 8073 if (LeftWeight < RightWeight || (LeftWeight == RightWeight && (I & 1))) 8074 LeftWeight += (++LastLeft)->Weight; 8075 else 8076 RightWeight += (--FirstRight)->Weight; 8077 I++; 8078 } 8079 8080 for (;;) { 8081 // Our binary search tree differs from a typical BST in that ours can have up 8082 // to three values in each leaf. The pivot selection above doesn't take that 8083 // into account, which means the tree might require more nodes and be less 8084 // efficient. We compensate for this here. 8085 8086 unsigned NumLeft = LastLeft - W.FirstCluster + 1; 8087 unsigned NumRight = W.LastCluster - FirstRight + 1; 8088 8089 if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) { 8090 // If one side has less than 3 clusters, and the other has more than 3, 8091 // consider taking a cluster from the other side. 8092 8093 if (NumLeft < NumRight) { 8094 // Consider moving the first cluster on the right to the left side. 8095 CaseCluster &CC = *FirstRight; 8096 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster); 8097 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft); 8098 if (LeftSideRank <= RightSideRank) { 8099 // Moving the cluster to the left does not demote it. 8100 ++LastLeft; 8101 ++FirstRight; 8102 continue; 8103 } 8104 } else { 8105 assert(NumRight < NumLeft); 8106 // Consider moving the last element on the left to the right side. 8107 CaseCluster &CC = *LastLeft; 8108 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft); 8109 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster); 8110 if (RightSideRank <= LeftSideRank) { 8111 // Moving the cluster to the right does not demot it. 8112 --LastLeft; 8113 --FirstRight; 8114 continue; 8115 } 8116 } 8117 } 8118 break; 8119 } 8120 8121 assert(LastLeft + 1 == FirstRight); 8122 assert(LastLeft >= W.FirstCluster); 8123 assert(FirstRight <= W.LastCluster); 8124 8125 // Use the first element on the right as pivot since we will make less-than 8126 // comparisons against it. 8127 CaseClusterIt PivotCluster = FirstRight; 8128 assert(PivotCluster > W.FirstCluster); 8129 assert(PivotCluster <= W.LastCluster); 8130 8131 CaseClusterIt FirstLeft = W.FirstCluster; 8132 CaseClusterIt LastRight = W.LastCluster; 8133 8134 const ConstantInt *Pivot = PivotCluster->Low; 8135 8136 // New blocks will be inserted immediately after the current one. 8137 MachineFunction::iterator BBI = W.MBB; 8138 ++BBI; 8139 8140 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster, 8141 // we can branch to its destination directly if it's squeezed exactly in 8142 // between the known lower bound and Pivot - 1. 8143 MachineBasicBlock *LeftMBB; 8144 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range && 8145 FirstLeft->Low == W.GE && 8146 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) { 8147 LeftMBB = FirstLeft->MBB; 8148 } else { 8149 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); 8150 FuncInfo.MF->insert(BBI, LeftMBB); 8151 WorkList.push_back({LeftMBB, FirstLeft, LastLeft, W.GE, Pivot}); 8152 // Put Cond in a virtual register to make it available from the new blocks. 8153 ExportFromCurrentBlock(Cond); 8154 } 8155 8156 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a 8157 // single cluster, RHS.Low == Pivot, and we can branch to its destination 8158 // directly if RHS.High equals the current upper bound. 8159 MachineBasicBlock *RightMBB; 8160 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && 8161 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) { 8162 RightMBB = FirstRight->MBB; 8163 } else { 8164 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); 8165 FuncInfo.MF->insert(BBI, RightMBB); 8166 WorkList.push_back({RightMBB, FirstRight, LastRight, Pivot, W.LT}); 8167 // Put Cond in a virtual register to make it available from the new blocks. 8168 ExportFromCurrentBlock(Cond); 8169 } 8170 8171 // Create the CaseBlock record that will be used to lower the branch. 8172 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB, 8173 LeftWeight, RightWeight); 8174 8175 if (W.MBB == SwitchMBB) 8176 visitSwitchCase(CB, SwitchMBB); 8177 else 8178 SwitchCases.push_back(CB); 8179 } 8180 8181 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { 8182 // Extract cases from the switch. 8183 BranchProbabilityInfo *BPI = FuncInfo.BPI; 8184 CaseClusterVector Clusters; 8185 Clusters.reserve(SI.getNumCases()); 8186 for (auto I : SI.cases()) { 8187 MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()]; 8188 const ConstantInt *CaseVal = I.getCaseValue(); 8189 uint32_t Weight = 8190 BPI ? BPI->getEdgeWeight(SI.getParent(), I.getSuccessorIndex()) : 0; 8191 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Weight)); 8192 } 8193 8194 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()]; 8195 8196 // Cluster adjacent cases with the same destination. We do this at all 8197 // optimization levels because it's cheap to do and will make codegen faster 8198 // if there are many clusters. 8199 sortAndRangeify(Clusters); 8200 8201 if (TM.getOptLevel() != CodeGenOpt::None) { 8202 // Replace an unreachable default with the most popular destination. 8203 // FIXME: Exploit unreachable default more aggressively. 8204 bool UnreachableDefault = 8205 isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg()); 8206 if (UnreachableDefault && !Clusters.empty()) { 8207 DenseMap<const BasicBlock *, unsigned> Popularity; 8208 unsigned MaxPop = 0; 8209 const BasicBlock *MaxBB = nullptr; 8210 for (auto I : SI.cases()) { 8211 const BasicBlock *BB = I.getCaseSuccessor(); 8212 if (++Popularity[BB] > MaxPop) { 8213 MaxPop = Popularity[BB]; 8214 MaxBB = BB; 8215 } 8216 } 8217 // Set new default. 8218 assert(MaxPop > 0 && MaxBB); 8219 DefaultMBB = FuncInfo.MBBMap[MaxBB]; 8220 8221 // Remove cases that were pointing to the destination that is now the 8222 // default. 8223 CaseClusterVector New; 8224 New.reserve(Clusters.size()); 8225 for (CaseCluster &CC : Clusters) { 8226 if (CC.MBB != DefaultMBB) 8227 New.push_back(CC); 8228 } 8229 Clusters = std::move(New); 8230 } 8231 } 8232 8233 // If there is only the default destination, jump there directly. 8234 MachineBasicBlock *SwitchMBB = FuncInfo.MBB; 8235 if (Clusters.empty()) { 8236 SwitchMBB->addSuccessor(DefaultMBB); 8237 if (DefaultMBB != NextBlock(SwitchMBB)) { 8238 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, 8239 getControlRoot(), DAG.getBasicBlock(DefaultMBB))); 8240 } 8241 return; 8242 } 8243 8244 findJumpTables(Clusters, &SI, DefaultMBB); 8245 findBitTestClusters(Clusters, &SI); 8246 8247 DEBUG({ 8248 dbgs() << "Case clusters: "; 8249 for (const CaseCluster &C : Clusters) { 8250 if (C.Kind == CC_JumpTable) dbgs() << "JT:"; 8251 if (C.Kind == CC_BitTests) dbgs() << "BT:"; 8252 8253 C.Low->getValue().print(dbgs(), true); 8254 if (C.Low != C.High) { 8255 dbgs() << '-'; 8256 C.High->getValue().print(dbgs(), true); 8257 } 8258 dbgs() << ' '; 8259 } 8260 dbgs() << '\n'; 8261 }); 8262 8263 assert(!Clusters.empty()); 8264 SwitchWorkList WorkList; 8265 CaseClusterIt First = Clusters.begin(); 8266 CaseClusterIt Last = Clusters.end() - 1; 8267 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr}); 8268 8269 while (!WorkList.empty()) { 8270 SwitchWorkListItem W = WorkList.back(); 8271 WorkList.pop_back(); 8272 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; 8273 8274 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None) { 8275 // For optimized builds, lower large range as a balanced binary tree. 8276 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB); 8277 continue; 8278 } 8279 8280 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB); 8281 } 8282 } 8283