1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements routines for translating from LLVM IR into SelectionDAG IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SelectionDAGBuilder.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/BitVector.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/BranchProbabilityInfo.h" 22 #include "llvm/Analysis/ConstantFolding.h" 23 #include "llvm/Analysis/Loads.h" 24 #include "llvm/Analysis/TargetLibraryInfo.h" 25 #include "llvm/Analysis/ValueTracking.h" 26 #include "llvm/Analysis/VectorUtils.h" 27 #include "llvm/CodeGen/Analysis.h" 28 #include "llvm/CodeGen/FastISel.h" 29 #include "llvm/CodeGen/FunctionLoweringInfo.h" 30 #include "llvm/CodeGen/GCMetadata.h" 31 #include "llvm/CodeGen/GCStrategy.h" 32 #include "llvm/CodeGen/MachineFrameInfo.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineInstrBuilder.h" 35 #include "llvm/CodeGen/MachineJumpTableInfo.h" 36 #include "llvm/CodeGen/MachineModuleInfo.h" 37 #include "llvm/CodeGen/MachineRegisterInfo.h" 38 #include "llvm/CodeGen/SelectionDAG.h" 39 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 40 #include "llvm/CodeGen/StackMaps.h" 41 #include "llvm/CodeGen/WinEHFuncInfo.h" 42 #include "llvm/IR/CallingConv.h" 43 #include "llvm/IR/ConstantRange.h" 44 #include "llvm/IR/Constants.h" 45 #include "llvm/IR/DataLayout.h" 46 #include "llvm/IR/DebugInfo.h" 47 #include "llvm/IR/DerivedTypes.h" 48 #include "llvm/IR/DIBuilder.h" 49 #include "llvm/IR/Function.h" 50 #include "llvm/IR/GetElementPtrTypeIterator.h" 51 #include "llvm/IR/GlobalVariable.h" 52 #include "llvm/IR/InlineAsm.h" 53 #include "llvm/IR/Instructions.h" 54 #include "llvm/IR/IntrinsicInst.h" 55 #include "llvm/IR/Intrinsics.h" 56 #include "llvm/IR/LLVMContext.h" 57 #include "llvm/IR/Module.h" 58 #include "llvm/IR/Statepoint.h" 59 #include "llvm/MC/MCSymbol.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Debug.h" 62 #include "llvm/Support/ErrorHandling.h" 63 #include "llvm/Support/MathExtras.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Target/TargetFrameLowering.h" 66 #include "llvm/Target/TargetInstrInfo.h" 67 #include "llvm/Target/TargetIntrinsicInfo.h" 68 #include "llvm/Target/TargetLowering.h" 69 #include "llvm/Target/TargetOptions.h" 70 #include "llvm/Target/TargetSubtargetInfo.h" 71 #include <algorithm> 72 #include <utility> 73 using namespace llvm; 74 75 #define DEBUG_TYPE "isel" 76 77 /// LimitFloatPrecision - Generate low-precision inline sequences for 78 /// some float libcalls (6, 8 or 12 bits). 79 static unsigned LimitFloatPrecision; 80 81 static cl::opt<unsigned, true> 82 LimitFPPrecision("limit-float-precision", 83 cl::desc("Generate low-precision inline sequences " 84 "for some float libcalls"), 85 cl::location(LimitFloatPrecision), 86 cl::init(0)); 87 // Limit the width of DAG chains. This is important in general to prevent 88 // DAG-based analysis from blowing up. For example, alias analysis and 89 // load clustering may not complete in reasonable time. It is difficult to 90 // recognize and avoid this situation within each individual analysis, and 91 // future analyses are likely to have the same behavior. Limiting DAG width is 92 // the safe approach and will be especially important with global DAGs. 93 // 94 // MaxParallelChains default is arbitrarily high to avoid affecting 95 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st 96 // sequence over this should have been converted to llvm.memcpy by the 97 // frontend. It is easy to induce this behavior with .ll code such as: 98 // %buffer = alloca [4096 x i8] 99 // %data = load [4096 x i8]* %argPtr 100 // store [4096 x i8] %data, [4096 x i8]* %buffer 101 static const unsigned MaxParallelChains = 64; 102 103 // True if the Value passed requires ABI mangling as it is a parameter to a 104 // function or a return value from a function which is not an intrinsic. 105 static bool isABIRegCopy(const Value * V) { 106 const bool IsRetInst = V && isa<ReturnInst>(V); 107 const bool IsCallInst = V && isa<CallInst>(V); 108 const bool IsInLineAsm = 109 IsCallInst && static_cast<const CallInst *>(V)->isInlineAsm(); 110 const bool IsIndirectFunctionCall = 111 IsCallInst && !IsInLineAsm && 112 !static_cast<const CallInst *>(V)->getCalledFunction(); 113 // It is possible that the call instruction is an inline asm statement or an 114 // indirect function call in which case the return value of 115 // getCalledFunction() would be nullptr. 116 const bool IsInstrinsicCall = 117 IsCallInst && !IsInLineAsm && !IsIndirectFunctionCall && 118 static_cast<const CallInst *>(V)->getCalledFunction()->getIntrinsicID() != 119 Intrinsic::not_intrinsic; 120 121 return IsRetInst || (IsCallInst && (!IsInLineAsm && !IsInstrinsicCall)); 122 } 123 124 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, 125 const SDValue *Parts, unsigned NumParts, 126 MVT PartVT, EVT ValueVT, const Value *V, 127 bool IsABIRegCopy); 128 129 /// getCopyFromParts - Create a value that contains the specified legal parts 130 /// combined into the value they represent. If the parts combine to a type 131 /// larger than ValueVT then AssertOp can be used to specify whether the extra 132 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT 133 /// (ISD::AssertSext). 134 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, 135 const SDValue *Parts, unsigned NumParts, 136 MVT PartVT, EVT ValueVT, const Value *V, 137 Optional<ISD::NodeType> AssertOp = None, 138 bool IsABIRegCopy = false) { 139 if (ValueVT.isVector()) 140 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, 141 PartVT, ValueVT, V, IsABIRegCopy); 142 143 assert(NumParts > 0 && "No parts to assemble!"); 144 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 145 SDValue Val = Parts[0]; 146 147 if (NumParts > 1) { 148 // Assemble the value from multiple parts. 149 if (ValueVT.isInteger()) { 150 unsigned PartBits = PartVT.getSizeInBits(); 151 unsigned ValueBits = ValueVT.getSizeInBits(); 152 153 // Assemble the power of 2 part. 154 unsigned RoundParts = NumParts & (NumParts - 1) ? 155 1 << Log2_32(NumParts) : NumParts; 156 unsigned RoundBits = PartBits * RoundParts; 157 EVT RoundVT = RoundBits == ValueBits ? 158 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits); 159 SDValue Lo, Hi; 160 161 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2); 162 163 if (RoundParts > 2) { 164 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, 165 PartVT, HalfVT, V); 166 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, 167 RoundParts / 2, PartVT, HalfVT, V); 168 } else { 169 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]); 170 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]); 171 } 172 173 if (DAG.getDataLayout().isBigEndian()) 174 std::swap(Lo, Hi); 175 176 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi); 177 178 if (RoundParts < NumParts) { 179 // Assemble the trailing non-power-of-2 part. 180 unsigned OddParts = NumParts - RoundParts; 181 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits); 182 Hi = getCopyFromParts(DAG, DL, 183 Parts + RoundParts, OddParts, PartVT, OddVT, V); 184 185 // Combine the round and odd parts. 186 Lo = Val; 187 if (DAG.getDataLayout().isBigEndian()) 188 std::swap(Lo, Hi); 189 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 190 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi); 191 Hi = 192 DAG.getNode(ISD::SHL, DL, TotalVT, Hi, 193 DAG.getConstant(Lo.getValueSizeInBits(), DL, 194 TLI.getPointerTy(DAG.getDataLayout()))); 195 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo); 196 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi); 197 } 198 } else if (PartVT.isFloatingPoint()) { 199 // FP split into multiple FP parts (for ppcf128) 200 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && 201 "Unexpected split"); 202 SDValue Lo, Hi; 203 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]); 204 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]); 205 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout())) 206 std::swap(Lo, Hi); 207 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi); 208 } else { 209 // FP split into integer parts (soft fp) 210 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() && 211 !PartVT.isVector() && "Unexpected split"); 212 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 213 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V); 214 } 215 } 216 217 // There is now one part, held in Val. Correct it to match ValueVT. 218 // PartEVT is the type of the register class that holds the value. 219 // ValueVT is the type of the inline asm operation. 220 EVT PartEVT = Val.getValueType(); 221 222 if (PartEVT == ValueVT) 223 return Val; 224 225 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() && 226 ValueVT.bitsLT(PartEVT)) { 227 // For an FP value in an integer part, we need to truncate to the right 228 // width first. 229 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 230 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val); 231 } 232 233 // Handle types that have the same size. 234 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits()) 235 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 236 237 // Handle types with different sizes. 238 if (PartEVT.isInteger() && ValueVT.isInteger()) { 239 if (ValueVT.bitsLT(PartEVT)) { 240 // For a truncate, see if we have any information to 241 // indicate whether the truncated bits will always be 242 // zero or sign-extension. 243 if (AssertOp.hasValue()) 244 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val, 245 DAG.getValueType(ValueVT)); 246 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 247 } 248 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val); 249 } 250 251 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 252 // FP_ROUND's are always exact here. 253 if (ValueVT.bitsLT(Val.getValueType())) 254 return DAG.getNode( 255 ISD::FP_ROUND, DL, ValueVT, Val, 256 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()))); 257 258 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val); 259 } 260 261 llvm_unreachable("Unknown mismatch!"); 262 } 263 264 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, 265 const Twine &ErrMsg) { 266 const Instruction *I = dyn_cast_or_null<Instruction>(V); 267 if (!V) 268 return Ctx.emitError(ErrMsg); 269 270 const char *AsmError = ", possible invalid constraint for vector type"; 271 if (const CallInst *CI = dyn_cast<CallInst>(I)) 272 if (isa<InlineAsm>(CI->getCalledValue())) 273 return Ctx.emitError(I, ErrMsg + AsmError); 274 275 return Ctx.emitError(I, ErrMsg); 276 } 277 278 /// getCopyFromPartsVector - Create a value that contains the specified legal 279 /// parts combined into the value they represent. If the parts combine to a 280 /// type larger than ValueVT then AssertOp can be used to specify whether the 281 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from 282 /// ValueVT (ISD::AssertSext). 283 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, 284 const SDValue *Parts, unsigned NumParts, 285 MVT PartVT, EVT ValueVT, const Value *V, 286 bool IsABIRegCopy) { 287 assert(ValueVT.isVector() && "Not a vector value"); 288 assert(NumParts > 0 && "No parts to assemble!"); 289 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 290 SDValue Val = Parts[0]; 291 292 // Handle a multi-element vector. 293 if (NumParts > 1) { 294 EVT IntermediateVT; 295 MVT RegisterVT; 296 unsigned NumIntermediates; 297 unsigned NumRegs; 298 299 if (IsABIRegCopy) { 300 NumRegs = TLI.getVectorTypeBreakdownForCallingConv( 301 *DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates, 302 RegisterVT); 303 } else { 304 NumRegs = 305 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, 306 NumIntermediates, RegisterVT); 307 } 308 309 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 310 NumParts = NumRegs; // Silence a compiler warning. 311 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 312 assert(RegisterVT.getSizeInBits() == 313 Parts[0].getSimpleValueType().getSizeInBits() && 314 "Part type sizes don't match!"); 315 316 // Assemble the parts into intermediate operands. 317 SmallVector<SDValue, 8> Ops(NumIntermediates); 318 if (NumIntermediates == NumParts) { 319 // If the register was not expanded, truncate or copy the value, 320 // as appropriate. 321 for (unsigned i = 0; i != NumParts; ++i) 322 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, 323 PartVT, IntermediateVT, V); 324 } else if (NumParts > 0) { 325 // If the intermediate type was expanded, build the intermediate 326 // operands from the parts. 327 assert(NumParts % NumIntermediates == 0 && 328 "Must expand into a divisible number of parts!"); 329 unsigned Factor = NumParts / NumIntermediates; 330 for (unsigned i = 0; i != NumIntermediates; ++i) 331 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, 332 PartVT, IntermediateVT, V); 333 } 334 335 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the 336 // intermediate operands. 337 EVT BuiltVectorTy = 338 EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(), 339 (IntermediateVT.isVector() 340 ? IntermediateVT.getVectorNumElements() * NumParts 341 : NumIntermediates)); 342 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS 343 : ISD::BUILD_VECTOR, 344 DL, BuiltVectorTy, Ops); 345 } 346 347 // There is now one part, held in Val. Correct it to match ValueVT. 348 EVT PartEVT = Val.getValueType(); 349 350 if (PartEVT == ValueVT) 351 return Val; 352 353 if (PartEVT.isVector()) { 354 // If the element type of the source/dest vectors are the same, but the 355 // parts vector has more elements than the value vector, then we have a 356 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the 357 // elements we want. 358 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) { 359 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() && 360 "Cannot narrow, it would be a lossy transformation"); 361 return DAG.getNode( 362 ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val, 363 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); 364 } 365 366 // Vector/Vector bitcast. 367 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) 368 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 369 370 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() && 371 "Cannot handle this kind of promotion"); 372 // Promoted vector extract 373 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT); 374 375 } 376 377 // Trivial bitcast if the types are the same size and the destination 378 // vector type is legal. 379 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() && 380 TLI.isTypeLegal(ValueVT)) 381 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 382 383 if (ValueVT.getVectorNumElements() != 1) { 384 // Certain ABIs require that vectors are passed as integers. For vectors 385 // are the same size, this is an obvious bitcast. 386 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) { 387 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 388 } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) { 389 // Bitcast Val back the original type and extract the corresponding 390 // vector we want. 391 unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits(); 392 EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(), 393 ValueVT.getVectorElementType(), Elts); 394 Val = DAG.getBitcast(WiderVecType, Val); 395 return DAG.getNode( 396 ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val, 397 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); 398 } 399 400 diagnosePossiblyInvalidConstraint( 401 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion"); 402 return DAG.getUNDEF(ValueVT); 403 } 404 405 // Handle cases such as i8 -> <1 x i1> 406 EVT ValueSVT = ValueVT.getVectorElementType(); 407 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) 408 Val = ValueVT.isFloatingPoint() ? DAG.getFPExtendOrRound(Val, DL, ValueSVT) 409 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT); 410 411 return DAG.getBuildVector(ValueVT, DL, Val); 412 } 413 414 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, 415 SDValue Val, SDValue *Parts, unsigned NumParts, 416 MVT PartVT, const Value *V, bool IsABIRegCopy); 417 418 /// getCopyToParts - Create a series of nodes that contain the specified value 419 /// split into legal parts. If the parts contain more bits than Val, then, for 420 /// integers, ExtendKind can be used to specify how to generate the extra bits. 421 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, 422 SDValue *Parts, unsigned NumParts, MVT PartVT, 423 const Value *V, 424 ISD::NodeType ExtendKind = ISD::ANY_EXTEND, 425 bool IsABIRegCopy = false) { 426 EVT ValueVT = Val.getValueType(); 427 428 // Handle the vector case separately. 429 if (ValueVT.isVector()) 430 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V, 431 IsABIRegCopy); 432 433 unsigned PartBits = PartVT.getSizeInBits(); 434 unsigned OrigNumParts = NumParts; 435 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && 436 "Copying to an illegal type!"); 437 438 if (NumParts == 0) 439 return; 440 441 assert(!ValueVT.isVector() && "Vector case handled elsewhere"); 442 EVT PartEVT = PartVT; 443 if (PartEVT == ValueVT) { 444 assert(NumParts == 1 && "No-op copy with multiple parts!"); 445 Parts[0] = Val; 446 return; 447 } 448 449 if (NumParts * PartBits > ValueVT.getSizeInBits()) { 450 // If the parts cover more bits than the value has, promote the value. 451 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 452 assert(NumParts == 1 && "Do not know what to promote to!"); 453 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val); 454 } else { 455 if (ValueVT.isFloatingPoint()) { 456 // FP values need to be bitcast, then extended if they are being put 457 // into a larger container. 458 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 459 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 460 } 461 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && 462 ValueVT.isInteger() && 463 "Unknown mismatch!"); 464 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 465 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val); 466 if (PartVT == MVT::x86mmx) 467 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 468 } 469 } else if (PartBits == ValueVT.getSizeInBits()) { 470 // Different types of the same size. 471 assert(NumParts == 1 && PartEVT != ValueVT); 472 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 473 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) { 474 // If the parts cover less bits than value has, truncate the value. 475 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && 476 ValueVT.isInteger() && 477 "Unknown mismatch!"); 478 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 479 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 480 if (PartVT == MVT::x86mmx) 481 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 482 } 483 484 // The value may have changed - recompute ValueVT. 485 ValueVT = Val.getValueType(); 486 assert(NumParts * PartBits == ValueVT.getSizeInBits() && 487 "Failed to tile the value with PartVT!"); 488 489 if (NumParts == 1) { 490 if (PartEVT != ValueVT) { 491 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V, 492 "scalar-to-vector conversion failed"); 493 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 494 } 495 496 Parts[0] = Val; 497 return; 498 } 499 500 // Expand the value into multiple parts. 501 if (NumParts & (NumParts - 1)) { 502 // The number of parts is not a power of 2. Split off and copy the tail. 503 assert(PartVT.isInteger() && ValueVT.isInteger() && 504 "Do not know what to expand to!"); 505 unsigned RoundParts = 1 << Log2_32(NumParts); 506 unsigned RoundBits = RoundParts * PartBits; 507 unsigned OddParts = NumParts - RoundParts; 508 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val, 509 DAG.getIntPtrConstant(RoundBits, DL)); 510 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V); 511 512 if (DAG.getDataLayout().isBigEndian()) 513 // The odd parts were reversed by getCopyToParts - unreverse them. 514 std::reverse(Parts + RoundParts, Parts + NumParts); 515 516 NumParts = RoundParts; 517 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 518 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 519 } 520 521 // The number of parts is a power of 2. Repeatedly bisect the value using 522 // EXTRACT_ELEMENT. 523 Parts[0] = DAG.getNode(ISD::BITCAST, DL, 524 EVT::getIntegerVT(*DAG.getContext(), 525 ValueVT.getSizeInBits()), 526 Val); 527 528 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) { 529 for (unsigned i = 0; i < NumParts; i += StepSize) { 530 unsigned ThisBits = StepSize * PartBits / 2; 531 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits); 532 SDValue &Part0 = Parts[i]; 533 SDValue &Part1 = Parts[i+StepSize/2]; 534 535 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, 536 ThisVT, Part0, DAG.getIntPtrConstant(1, DL)); 537 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, 538 ThisVT, Part0, DAG.getIntPtrConstant(0, DL)); 539 540 if (ThisBits == PartBits && ThisVT != PartVT) { 541 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0); 542 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1); 543 } 544 } 545 } 546 547 if (DAG.getDataLayout().isBigEndian()) 548 std::reverse(Parts, Parts + OrigNumParts); 549 } 550 551 552 /// getCopyToPartsVector - Create a series of nodes that contain the specified 553 /// value split into legal parts. 554 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL, 555 SDValue Val, SDValue *Parts, unsigned NumParts, 556 MVT PartVT, const Value *V, 557 bool IsABIRegCopy) { 558 559 EVT ValueVT = Val.getValueType(); 560 assert(ValueVT.isVector() && "Not a vector"); 561 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 562 563 if (NumParts == 1) { 564 EVT PartEVT = PartVT; 565 if (PartEVT == ValueVT) { 566 // Nothing to do. 567 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) { 568 // Bitconvert vector->vector case. 569 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 570 } else if (PartVT.isVector() && 571 PartEVT.getVectorElementType() == ValueVT.getVectorElementType() && 572 PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) { 573 EVT ElementVT = PartVT.getVectorElementType(); 574 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in 575 // undef elements. 576 SmallVector<SDValue, 16> Ops; 577 for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i) 578 Ops.push_back(DAG.getNode( 579 ISD::EXTRACT_VECTOR_ELT, DL, ElementVT, Val, 580 DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout())))); 581 582 for (unsigned i = ValueVT.getVectorNumElements(), 583 e = PartVT.getVectorNumElements(); i != e; ++i) 584 Ops.push_back(DAG.getUNDEF(ElementVT)); 585 586 Val = DAG.getBuildVector(PartVT, DL, Ops); 587 588 // FIXME: Use CONCAT for 2x -> 4x. 589 590 //SDValue UndefElts = DAG.getUNDEF(VectorTy); 591 //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts); 592 } else if (PartVT.isVector() && 593 PartEVT.getVectorElementType().bitsGE( 594 ValueVT.getVectorElementType()) && 595 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) { 596 597 // Promoted vector extract 598 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); 599 } else { 600 if (ValueVT.getVectorNumElements() == 1) { 601 Val = DAG.getNode( 602 ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val, 603 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); 604 605 } else { 606 assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() && 607 "lossy conversion of vector to scalar type"); 608 EVT IntermediateType = 609 EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 610 Val = DAG.getBitcast(IntermediateType, Val); 611 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); 612 } 613 } 614 615 assert(Val.getValueType() == PartVT && "Unexpected vector part value type"); 616 Parts[0] = Val; 617 return; 618 } 619 620 // Handle a multi-element vector. 621 EVT IntermediateVT; 622 MVT RegisterVT; 623 unsigned NumIntermediates; 624 unsigned NumRegs; 625 if (IsABIRegCopy) { 626 NumRegs = TLI.getVectorTypeBreakdownForCallingConv( 627 *DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates, 628 RegisterVT); 629 } else { 630 NumRegs = 631 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, 632 NumIntermediates, RegisterVT); 633 } 634 unsigned NumElements = ValueVT.getVectorNumElements(); 635 636 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 637 NumParts = NumRegs; // Silence a compiler warning. 638 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 639 640 // Convert the vector to the appropiate type if necessary. 641 unsigned DestVectorNoElts = 642 NumIntermediates * 643 (IntermediateVT.isVector() ? IntermediateVT.getVectorNumElements() : 1); 644 EVT BuiltVectorTy = EVT::getVectorVT( 645 *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts); 646 if (Val.getValueType() != BuiltVectorTy) 647 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val); 648 649 // Split the vector into intermediate operands. 650 SmallVector<SDValue, 8> Ops(NumIntermediates); 651 for (unsigned i = 0; i != NumIntermediates; ++i) { 652 if (IntermediateVT.isVector()) 653 Ops[i] = 654 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val, 655 DAG.getConstant(i * (NumElements / NumIntermediates), DL, 656 TLI.getVectorIdxTy(DAG.getDataLayout()))); 657 else 658 Ops[i] = DAG.getNode( 659 ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val, 660 DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); 661 } 662 663 // Split the intermediate operands into legal parts. 664 if (NumParts == NumIntermediates) { 665 // If the register was not expanded, promote or copy the value, 666 // as appropriate. 667 for (unsigned i = 0; i != NumParts; ++i) 668 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V); 669 } else if (NumParts > 0) { 670 // If the intermediate type was expanded, split each the value into 671 // legal parts. 672 assert(NumIntermediates != 0 && "division by zero"); 673 assert(NumParts % NumIntermediates == 0 && 674 "Must expand into a divisible number of parts!"); 675 unsigned Factor = NumParts / NumIntermediates; 676 for (unsigned i = 0; i != NumIntermediates; ++i) 677 getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V); 678 } 679 } 680 681 RegsForValue::RegsForValue() { IsABIMangled = false; } 682 683 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt, 684 EVT valuevt, bool IsABIMangledValue) 685 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs), 686 RegCount(1, regs.size()), IsABIMangled(IsABIMangledValue) {} 687 688 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI, 689 const DataLayout &DL, unsigned Reg, Type *Ty, 690 bool IsABIMangledValue) { 691 ComputeValueVTs(TLI, DL, Ty, ValueVTs); 692 693 IsABIMangled = IsABIMangledValue; 694 695 for (EVT ValueVT : ValueVTs) { 696 unsigned NumRegs = IsABIMangledValue 697 ? TLI.getNumRegistersForCallingConv(Context, ValueVT) 698 : TLI.getNumRegisters(Context, ValueVT); 699 MVT RegisterVT = IsABIMangledValue 700 ? TLI.getRegisterTypeForCallingConv(Context, ValueVT) 701 : TLI.getRegisterType(Context, ValueVT); 702 for (unsigned i = 0; i != NumRegs; ++i) 703 Regs.push_back(Reg + i); 704 RegVTs.push_back(RegisterVT); 705 RegCount.push_back(NumRegs); 706 Reg += NumRegs; 707 } 708 } 709 710 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 711 FunctionLoweringInfo &FuncInfo, 712 const SDLoc &dl, SDValue &Chain, 713 SDValue *Flag, const Value *V) const { 714 // A Value with type {} or [0 x %t] needs no registers. 715 if (ValueVTs.empty()) 716 return SDValue(); 717 718 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 719 720 // Assemble the legal parts into the final values. 721 SmallVector<SDValue, 4> Values(ValueVTs.size()); 722 SmallVector<SDValue, 8> Parts; 723 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 724 // Copy the legal parts from the registers. 725 EVT ValueVT = ValueVTs[Value]; 726 unsigned NumRegs = RegCount[Value]; 727 MVT RegisterVT = IsABIMangled 728 ? TLI.getRegisterTypeForCallingConv(RegVTs[Value]) 729 : RegVTs[Value]; 730 731 Parts.resize(NumRegs); 732 for (unsigned i = 0; i != NumRegs; ++i) { 733 SDValue P; 734 if (!Flag) { 735 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT); 736 } else { 737 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag); 738 *Flag = P.getValue(2); 739 } 740 741 Chain = P.getValue(1); 742 Parts[i] = P; 743 744 // If the source register was virtual and if we know something about it, 745 // add an assert node. 746 if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) || 747 !RegisterVT.isInteger() || RegisterVT.isVector()) 748 continue; 749 750 const FunctionLoweringInfo::LiveOutInfo *LOI = 751 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]); 752 if (!LOI) 753 continue; 754 755 unsigned RegSize = RegisterVT.getSizeInBits(); 756 unsigned NumSignBits = LOI->NumSignBits; 757 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros(); 758 759 if (NumZeroBits == RegSize) { 760 // The current value is a zero. 761 // Explicitly express that as it would be easier for 762 // optimizations to kick in. 763 Parts[i] = DAG.getConstant(0, dl, RegisterVT); 764 continue; 765 } 766 767 // FIXME: We capture more information than the dag can represent. For 768 // now, just use the tightest assertzext/assertsext possible. 769 bool isSExt = true; 770 EVT FromVT(MVT::Other); 771 if (NumSignBits == RegSize) { 772 isSExt = true; // ASSERT SEXT 1 773 FromVT = MVT::i1; 774 } else if (NumZeroBits >= RegSize - 1) { 775 isSExt = false; // ASSERT ZEXT 1 776 FromVT = MVT::i1; 777 } else if (NumSignBits > RegSize - 8) { 778 isSExt = true; // ASSERT SEXT 8 779 FromVT = MVT::i8; 780 } else if (NumZeroBits >= RegSize - 8) { 781 isSExt = false; // ASSERT ZEXT 8 782 FromVT = MVT::i8; 783 } else if (NumSignBits > RegSize - 16) { 784 isSExt = true; // ASSERT SEXT 16 785 FromVT = MVT::i16; 786 } else if (NumZeroBits >= RegSize - 16) { 787 isSExt = false; // ASSERT ZEXT 16 788 FromVT = MVT::i16; 789 } else if (NumSignBits > RegSize - 32) { 790 isSExt = true; // ASSERT SEXT 32 791 FromVT = MVT::i32; 792 } else if (NumZeroBits >= RegSize - 32) { 793 isSExt = false; // ASSERT ZEXT 32 794 FromVT = MVT::i32; 795 } else { 796 continue; 797 } 798 // Add an assertion node. 799 assert(FromVT != MVT::Other); 800 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl, 801 RegisterVT, P, DAG.getValueType(FromVT)); 802 } 803 804 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), 805 NumRegs, RegisterVT, ValueVT, V); 806 Part += NumRegs; 807 Parts.clear(); 808 } 809 810 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values); 811 } 812 813 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, 814 const SDLoc &dl, SDValue &Chain, SDValue *Flag, 815 const Value *V, 816 ISD::NodeType PreferredExtendType) const { 817 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 818 ISD::NodeType ExtendKind = PreferredExtendType; 819 820 // Get the list of the values's legal parts. 821 unsigned NumRegs = Regs.size(); 822 SmallVector<SDValue, 8> Parts(NumRegs); 823 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 824 unsigned NumParts = RegCount[Value]; 825 826 MVT RegisterVT = IsABIMangled 827 ? TLI.getRegisterTypeForCallingConv(RegVTs[Value]) 828 : RegVTs[Value]; 829 830 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT)) 831 ExtendKind = ISD::ZERO_EXTEND; 832 833 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), 834 &Parts[Part], NumParts, RegisterVT, V, ExtendKind); 835 Part += NumParts; 836 } 837 838 // Copy the parts into the registers. 839 SmallVector<SDValue, 8> Chains(NumRegs); 840 for (unsigned i = 0; i != NumRegs; ++i) { 841 SDValue Part; 842 if (!Flag) { 843 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]); 844 } else { 845 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag); 846 *Flag = Part.getValue(1); 847 } 848 849 Chains[i] = Part.getValue(0); 850 } 851 852 if (NumRegs == 1 || Flag) 853 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is 854 // flagged to it. That is the CopyToReg nodes and the user are considered 855 // a single scheduling unit. If we create a TokenFactor and return it as 856 // chain, then the TokenFactor is both a predecessor (operand) of the 857 // user as well as a successor (the TF operands are flagged to the user). 858 // c1, f1 = CopyToReg 859 // c2, f2 = CopyToReg 860 // c3 = TokenFactor c1, c2 861 // ... 862 // = op c3, ..., f2 863 Chain = Chains[NumRegs-1]; 864 else 865 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 866 } 867 868 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching, 869 unsigned MatchingIdx, const SDLoc &dl, 870 SelectionDAG &DAG, 871 std::vector<SDValue> &Ops) const { 872 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 873 874 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size()); 875 if (HasMatching) 876 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx); 877 else if (!Regs.empty() && 878 TargetRegisterInfo::isVirtualRegister(Regs.front())) { 879 // Put the register class of the virtual registers in the flag word. That 880 // way, later passes can recompute register class constraints for inline 881 // assembly as well as normal instructions. 882 // Don't do this for tied operands that can use the regclass information 883 // from the def. 884 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 885 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front()); 886 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); 887 } 888 889 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32); 890 Ops.push_back(Res); 891 892 unsigned SP = TLI.getStackPointerRegisterToSaveRestore(); 893 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { 894 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]); 895 MVT RegisterVT = RegVTs[Value]; 896 for (unsigned i = 0; i != NumRegs; ++i) { 897 assert(Reg < Regs.size() && "Mismatch in # registers expected"); 898 unsigned TheReg = Regs[Reg++]; 899 Ops.push_back(DAG.getRegister(TheReg, RegisterVT)); 900 901 if (TheReg == SP && Code == InlineAsm::Kind_Clobber) { 902 // If we clobbered the stack pointer, MFI should know about it. 903 assert(DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()); 904 } 905 } 906 } 907 } 908 909 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa, 910 const TargetLibraryInfo *li) { 911 AA = aa; 912 GFI = gfi; 913 LibInfo = li; 914 DL = &DAG.getDataLayout(); 915 Context = DAG.getContext(); 916 LPadToCallSiteMap.clear(); 917 } 918 919 void SelectionDAGBuilder::clear() { 920 NodeMap.clear(); 921 UnusedArgNodeMap.clear(); 922 PendingLoads.clear(); 923 PendingExports.clear(); 924 CurInst = nullptr; 925 HasTailCall = false; 926 SDNodeOrder = LowestSDNodeOrder; 927 StatepointLowering.clear(); 928 } 929 930 void SelectionDAGBuilder::clearDanglingDebugInfo() { 931 DanglingDebugInfoMap.clear(); 932 } 933 934 SDValue SelectionDAGBuilder::getRoot() { 935 if (PendingLoads.empty()) 936 return DAG.getRoot(); 937 938 if (PendingLoads.size() == 1) { 939 SDValue Root = PendingLoads[0]; 940 DAG.setRoot(Root); 941 PendingLoads.clear(); 942 return Root; 943 } 944 945 // Otherwise, we have to make a token factor node. 946 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, 947 PendingLoads); 948 PendingLoads.clear(); 949 DAG.setRoot(Root); 950 return Root; 951 } 952 953 SDValue SelectionDAGBuilder::getControlRoot() { 954 SDValue Root = DAG.getRoot(); 955 956 if (PendingExports.empty()) 957 return Root; 958 959 // Turn all of the CopyToReg chains into one factored node. 960 if (Root.getOpcode() != ISD::EntryToken) { 961 unsigned i = 0, e = PendingExports.size(); 962 for (; i != e; ++i) { 963 assert(PendingExports[i].getNode()->getNumOperands() > 1); 964 if (PendingExports[i].getNode()->getOperand(0) == Root) 965 break; // Don't add the root if we already indirectly depend on it. 966 } 967 968 if (i == e) 969 PendingExports.push_back(Root); 970 } 971 972 Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, 973 PendingExports); 974 PendingExports.clear(); 975 DAG.setRoot(Root); 976 return Root; 977 } 978 979 void SelectionDAGBuilder::visit(const Instruction &I) { 980 // Set up outgoing PHI node register values before emitting the terminator. 981 if (isa<TerminatorInst>(&I)) { 982 HandlePHINodesInSuccessorBlocks(I.getParent()); 983 } 984 985 // Increase the SDNodeOrder if dealing with a non-debug instruction. 986 if (!isa<DbgInfoIntrinsic>(I)) 987 ++SDNodeOrder; 988 989 CurInst = &I; 990 991 visit(I.getOpcode(), I); 992 993 if (!isa<TerminatorInst>(&I) && !HasTailCall && 994 !isStatepoint(&I)) // statepoints handle their exports internally 995 CopyToExportRegsIfNeeded(&I); 996 997 CurInst = nullptr; 998 } 999 1000 void SelectionDAGBuilder::visitPHI(const PHINode &) { 1001 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!"); 1002 } 1003 1004 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) { 1005 // Note: this doesn't use InstVisitor, because it has to work with 1006 // ConstantExpr's in addition to instructions. 1007 switch (Opcode) { 1008 default: llvm_unreachable("Unknown instruction type encountered!"); 1009 // Build the switch statement using the Instruction.def file. 1010 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1011 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break; 1012 #include "llvm/IR/Instruction.def" 1013 } 1014 } 1015 1016 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V, 1017 // generate the debug data structures now that we've seen its definition. 1018 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V, 1019 SDValue Val) { 1020 DanglingDebugInfo &DDI = DanglingDebugInfoMap[V]; 1021 if (DDI.getDI()) { 1022 const DbgValueInst *DI = DDI.getDI(); 1023 DebugLoc dl = DDI.getdl(); 1024 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder(); 1025 DILocalVariable *Variable = DI->getVariable(); 1026 DIExpression *Expr = DI->getExpression(); 1027 assert(Variable->isValidLocationForIntrinsic(dl) && 1028 "Expected inlined-at fields to agree"); 1029 SDDbgValue *SDV; 1030 if (Val.getNode()) { 1031 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) { 1032 SDV = getDbgValue(Val, Variable, Expr, dl, DbgSDNodeOrder); 1033 DAG.AddDbgValue(SDV, Val.getNode(), false); 1034 } 1035 } else 1036 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1037 DanglingDebugInfoMap[V] = DanglingDebugInfo(); 1038 } 1039 } 1040 1041 /// getCopyFromRegs - If there was virtual register allocated for the value V 1042 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise. 1043 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) { 1044 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V); 1045 SDValue Result; 1046 1047 if (It != FuncInfo.ValueMap.end()) { 1048 unsigned InReg = It->second; 1049 1050 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), 1051 DAG.getDataLayout(), InReg, Ty, isABIRegCopy(V)); 1052 SDValue Chain = DAG.getEntryNode(); 1053 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, 1054 V); 1055 resolveDanglingDebugInfo(V, Result); 1056 } 1057 1058 return Result; 1059 } 1060 1061 /// getValue - Return an SDValue for the given Value. 1062 SDValue SelectionDAGBuilder::getValue(const Value *V) { 1063 // If we already have an SDValue for this value, use it. It's important 1064 // to do this first, so that we don't create a CopyFromReg if we already 1065 // have a regular SDValue. 1066 SDValue &N = NodeMap[V]; 1067 if (N.getNode()) return N; 1068 1069 // If there's a virtual register allocated and initialized for this 1070 // value, use it. 1071 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType())) 1072 return copyFromReg; 1073 1074 // Otherwise create a new SDValue and remember it. 1075 SDValue Val = getValueImpl(V); 1076 NodeMap[V] = Val; 1077 resolveDanglingDebugInfo(V, Val); 1078 return Val; 1079 } 1080 1081 // Return true if SDValue exists for the given Value 1082 bool SelectionDAGBuilder::findValue(const Value *V) const { 1083 return (NodeMap.find(V) != NodeMap.end()) || 1084 (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end()); 1085 } 1086 1087 /// getNonRegisterValue - Return an SDValue for the given Value, but 1088 /// don't look in FuncInfo.ValueMap for a virtual register. 1089 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) { 1090 // If we already have an SDValue for this value, use it. 1091 SDValue &N = NodeMap[V]; 1092 if (N.getNode()) { 1093 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) { 1094 // Remove the debug location from the node as the node is about to be used 1095 // in a location which may differ from the original debug location. This 1096 // is relevant to Constant and ConstantFP nodes because they can appear 1097 // as constant expressions inside PHI nodes. 1098 N->setDebugLoc(DebugLoc()); 1099 } 1100 return N; 1101 } 1102 1103 // Otherwise create a new SDValue and remember it. 1104 SDValue Val = getValueImpl(V); 1105 NodeMap[V] = Val; 1106 resolveDanglingDebugInfo(V, Val); 1107 return Val; 1108 } 1109 1110 /// getValueImpl - Helper function for getValue and getNonRegisterValue. 1111 /// Create an SDValue for the given value. 1112 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { 1113 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1114 1115 if (const Constant *C = dyn_cast<Constant>(V)) { 1116 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true); 1117 1118 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1119 return DAG.getConstant(*CI, getCurSDLoc(), VT); 1120 1121 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 1122 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT); 1123 1124 if (isa<ConstantPointerNull>(C)) { 1125 unsigned AS = V->getType()->getPointerAddressSpace(); 1126 return DAG.getConstant(0, getCurSDLoc(), 1127 TLI.getPointerTy(DAG.getDataLayout(), AS)); 1128 } 1129 1130 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 1131 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT); 1132 1133 if (isa<UndefValue>(C) && !V->getType()->isAggregateType()) 1134 return DAG.getUNDEF(VT); 1135 1136 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 1137 visit(CE->getOpcode(), *CE); 1138 SDValue N1 = NodeMap[V]; 1139 assert(N1.getNode() && "visit didn't populate the NodeMap!"); 1140 return N1; 1141 } 1142 1143 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) { 1144 SmallVector<SDValue, 4> Constants; 1145 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end(); 1146 OI != OE; ++OI) { 1147 SDNode *Val = getValue(*OI).getNode(); 1148 // If the operand is an empty aggregate, there are no values. 1149 if (!Val) continue; 1150 // Add each leaf value from the operand to the Constants list 1151 // to form a flattened list of all the values. 1152 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1153 Constants.push_back(SDValue(Val, i)); 1154 } 1155 1156 return DAG.getMergeValues(Constants, getCurSDLoc()); 1157 } 1158 1159 if (const ConstantDataSequential *CDS = 1160 dyn_cast<ConstantDataSequential>(C)) { 1161 SmallVector<SDValue, 4> Ops; 1162 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1163 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode(); 1164 // Add each leaf value from the operand to the Constants list 1165 // to form a flattened list of all the values. 1166 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1167 Ops.push_back(SDValue(Val, i)); 1168 } 1169 1170 if (isa<ArrayType>(CDS->getType())) 1171 return DAG.getMergeValues(Ops, getCurSDLoc()); 1172 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops); 1173 } 1174 1175 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) { 1176 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && 1177 "Unknown struct or array constant!"); 1178 1179 SmallVector<EVT, 4> ValueVTs; 1180 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs); 1181 unsigned NumElts = ValueVTs.size(); 1182 if (NumElts == 0) 1183 return SDValue(); // empty struct 1184 SmallVector<SDValue, 4> Constants(NumElts); 1185 for (unsigned i = 0; i != NumElts; ++i) { 1186 EVT EltVT = ValueVTs[i]; 1187 if (isa<UndefValue>(C)) 1188 Constants[i] = DAG.getUNDEF(EltVT); 1189 else if (EltVT.isFloatingPoint()) 1190 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT); 1191 else 1192 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT); 1193 } 1194 1195 return DAG.getMergeValues(Constants, getCurSDLoc()); 1196 } 1197 1198 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) 1199 return DAG.getBlockAddress(BA, VT); 1200 1201 VectorType *VecTy = cast<VectorType>(V->getType()); 1202 unsigned NumElements = VecTy->getNumElements(); 1203 1204 // Now that we know the number and type of the elements, get that number of 1205 // elements into the Ops array based on what kind of constant it is. 1206 SmallVector<SDValue, 16> Ops; 1207 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) { 1208 for (unsigned i = 0; i != NumElements; ++i) 1209 Ops.push_back(getValue(CV->getOperand(i))); 1210 } else { 1211 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!"); 1212 EVT EltVT = 1213 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType()); 1214 1215 SDValue Op; 1216 if (EltVT.isFloatingPoint()) 1217 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT); 1218 else 1219 Op = DAG.getConstant(0, getCurSDLoc(), EltVT); 1220 Ops.assign(NumElements, Op); 1221 } 1222 1223 // Create a BUILD_VECTOR node. 1224 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops); 1225 } 1226 1227 // If this is a static alloca, generate it as the frameindex instead of 1228 // computation. 1229 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1230 DenseMap<const AllocaInst*, int>::iterator SI = 1231 FuncInfo.StaticAllocaMap.find(AI); 1232 if (SI != FuncInfo.StaticAllocaMap.end()) 1233 return DAG.getFrameIndex(SI->second, 1234 TLI.getFrameIndexTy(DAG.getDataLayout())); 1235 } 1236 1237 // If this is an instruction which fast-isel has deferred, select it now. 1238 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 1239 unsigned InReg = FuncInfo.InitializeRegForValue(Inst); 1240 1241 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, 1242 Inst->getType(), isABIRegCopy(V)); 1243 SDValue Chain = DAG.getEntryNode(); 1244 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); 1245 } 1246 1247 llvm_unreachable("Can't get register for value!"); 1248 } 1249 1250 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) { 1251 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 1252 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX; 1253 bool IsCoreCLR = Pers == EHPersonality::CoreCLR; 1254 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB; 1255 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues. 1256 if (IsMSVCCXX || IsCoreCLR) 1257 CatchPadMBB->setIsEHFuncletEntry(); 1258 1259 DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other, getControlRoot())); 1260 } 1261 1262 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) { 1263 // Update machine-CFG edge. 1264 MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()]; 1265 FuncInfo.MBB->addSuccessor(TargetMBB); 1266 1267 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 1268 bool IsSEH = isAsynchronousEHPersonality(Pers); 1269 if (IsSEH) { 1270 // If this is not a fall-through branch or optimizations are switched off, 1271 // emit the branch. 1272 if (TargetMBB != NextBlock(FuncInfo.MBB) || 1273 TM.getOptLevel() == CodeGenOpt::None) 1274 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, 1275 getControlRoot(), DAG.getBasicBlock(TargetMBB))); 1276 return; 1277 } 1278 1279 // Figure out the funclet membership for the catchret's successor. 1280 // This will be used by the FuncletLayout pass to determine how to order the 1281 // BB's. 1282 // A 'catchret' returns to the outer scope's color. 1283 Value *ParentPad = I.getCatchSwitchParentPad(); 1284 const BasicBlock *SuccessorColor; 1285 if (isa<ConstantTokenNone>(ParentPad)) 1286 SuccessorColor = &FuncInfo.Fn->getEntryBlock(); 1287 else 1288 SuccessorColor = cast<Instruction>(ParentPad)->getParent(); 1289 assert(SuccessorColor && "No parent funclet for catchret!"); 1290 MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor]; 1291 assert(SuccessorColorMBB && "No MBB for SuccessorColor!"); 1292 1293 // Create the terminator node. 1294 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other, 1295 getControlRoot(), DAG.getBasicBlock(TargetMBB), 1296 DAG.getBasicBlock(SuccessorColorMBB)); 1297 DAG.setRoot(Ret); 1298 } 1299 1300 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) { 1301 // Don't emit any special code for the cleanuppad instruction. It just marks 1302 // the start of a funclet. 1303 FuncInfo.MBB->setIsEHFuncletEntry(); 1304 FuncInfo.MBB->setIsCleanupFuncletEntry(); 1305 } 1306 1307 /// When an invoke or a cleanupret unwinds to the next EH pad, there are 1308 /// many places it could ultimately go. In the IR, we have a single unwind 1309 /// destination, but in the machine CFG, we enumerate all the possible blocks. 1310 /// This function skips over imaginary basic blocks that hold catchswitch 1311 /// instructions, and finds all the "real" machine 1312 /// basic block destinations. As those destinations may not be successors of 1313 /// EHPadBB, here we also calculate the edge probability to those destinations. 1314 /// The passed-in Prob is the edge probability to EHPadBB. 1315 static void findUnwindDestinations( 1316 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, 1317 BranchProbability Prob, 1318 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> 1319 &UnwindDests) { 1320 EHPersonality Personality = 1321 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 1322 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX; 1323 bool IsCoreCLR = Personality == EHPersonality::CoreCLR; 1324 1325 while (EHPadBB) { 1326 const Instruction *Pad = EHPadBB->getFirstNonPHI(); 1327 BasicBlock *NewEHPadBB = nullptr; 1328 if (isa<LandingPadInst>(Pad)) { 1329 // Stop on landingpads. They are not funclets. 1330 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob); 1331 break; 1332 } else if (isa<CleanupPadInst>(Pad)) { 1333 // Stop on cleanup pads. Cleanups are always funclet entries for all known 1334 // personalities. 1335 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob); 1336 UnwindDests.back().first->setIsEHFuncletEntry(); 1337 break; 1338 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) { 1339 // Add the catchpad handlers to the possible destinations. 1340 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { 1341 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob); 1342 // For MSVC++ and the CLR, catchblocks are funclets and need prologues. 1343 if (IsMSVCCXX || IsCoreCLR) 1344 UnwindDests.back().first->setIsEHFuncletEntry(); 1345 } 1346 NewEHPadBB = CatchSwitch->getUnwindDest(); 1347 } else { 1348 continue; 1349 } 1350 1351 BranchProbabilityInfo *BPI = FuncInfo.BPI; 1352 if (BPI && NewEHPadBB) 1353 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB); 1354 EHPadBB = NewEHPadBB; 1355 } 1356 } 1357 1358 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) { 1359 // Update successor info. 1360 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; 1361 auto UnwindDest = I.getUnwindDest(); 1362 BranchProbabilityInfo *BPI = FuncInfo.BPI; 1363 BranchProbability UnwindDestProb = 1364 (BPI && UnwindDest) 1365 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest) 1366 : BranchProbability::getZero(); 1367 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests); 1368 for (auto &UnwindDest : UnwindDests) { 1369 UnwindDest.first->setIsEHPad(); 1370 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second); 1371 } 1372 FuncInfo.MBB->normalizeSuccProbs(); 1373 1374 // Create the terminator node. 1375 SDValue Ret = 1376 DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot()); 1377 DAG.setRoot(Ret); 1378 } 1379 1380 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) { 1381 report_fatal_error("visitCatchSwitch not yet implemented!"); 1382 } 1383 1384 void SelectionDAGBuilder::visitRet(const ReturnInst &I) { 1385 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1386 auto &DL = DAG.getDataLayout(); 1387 SDValue Chain = getControlRoot(); 1388 SmallVector<ISD::OutputArg, 8> Outs; 1389 SmallVector<SDValue, 8> OutVals; 1390 1391 // Calls to @llvm.experimental.deoptimize don't generate a return value, so 1392 // lower 1393 // 1394 // %val = call <ty> @llvm.experimental.deoptimize() 1395 // ret <ty> %val 1396 // 1397 // differently. 1398 if (I.getParent()->getTerminatingDeoptimizeCall()) { 1399 LowerDeoptimizingReturn(); 1400 return; 1401 } 1402 1403 if (!FuncInfo.CanLowerReturn) { 1404 unsigned DemoteReg = FuncInfo.DemoteRegister; 1405 const Function *F = I.getParent()->getParent(); 1406 1407 // Emit a store of the return value through the virtual register. 1408 // Leave Outs empty so that LowerReturn won't try to load return 1409 // registers the usual way. 1410 SmallVector<EVT, 1> PtrValueVTs; 1411 ComputeValueVTs(TLI, DL, PointerType::getUnqual(F->getReturnType()), 1412 PtrValueVTs); 1413 1414 SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), 1415 DemoteReg, PtrValueVTs[0]); 1416 SDValue RetOp = getValue(I.getOperand(0)); 1417 1418 SmallVector<EVT, 4> ValueVTs; 1419 SmallVector<uint64_t, 4> Offsets; 1420 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets); 1421 unsigned NumValues = ValueVTs.size(); 1422 1423 // An aggregate return value cannot wrap around the address space, so 1424 // offsets to its parts don't wrap either. 1425 SDNodeFlags Flags; 1426 Flags.setNoUnsignedWrap(true); 1427 1428 SmallVector<SDValue, 4> Chains(NumValues); 1429 for (unsigned i = 0; i != NumValues; ++i) { 1430 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), 1431 RetPtr.getValueType(), RetPtr, 1432 DAG.getIntPtrConstant(Offsets[i], 1433 getCurSDLoc()), 1434 Flags); 1435 Chains[i] = DAG.getStore(Chain, getCurSDLoc(), 1436 SDValue(RetOp.getNode(), RetOp.getResNo() + i), 1437 // FIXME: better loc info would be nice. 1438 Add, MachinePointerInfo()); 1439 } 1440 1441 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), 1442 MVT::Other, Chains); 1443 } else if (I.getNumOperands() != 0) { 1444 SmallVector<EVT, 4> ValueVTs; 1445 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs); 1446 unsigned NumValues = ValueVTs.size(); 1447 if (NumValues) { 1448 SDValue RetOp = getValue(I.getOperand(0)); 1449 1450 const Function *F = I.getParent()->getParent(); 1451 1452 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 1453 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex, 1454 Attribute::SExt)) 1455 ExtendKind = ISD::SIGN_EXTEND; 1456 else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex, 1457 Attribute::ZExt)) 1458 ExtendKind = ISD::ZERO_EXTEND; 1459 1460 LLVMContext &Context = F->getContext(); 1461 bool RetInReg = F->getAttributes().hasAttribute( 1462 AttributeList::ReturnIndex, Attribute::InReg); 1463 1464 for (unsigned j = 0; j != NumValues; ++j) { 1465 EVT VT = ValueVTs[j]; 1466 1467 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) 1468 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind); 1469 1470 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, VT); 1471 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, VT); 1472 SmallVector<SDValue, 4> Parts(NumParts); 1473 getCopyToParts(DAG, getCurSDLoc(), 1474 SDValue(RetOp.getNode(), RetOp.getResNo() + j), 1475 &Parts[0], NumParts, PartVT, &I, ExtendKind, true); 1476 1477 // 'inreg' on function refers to return value 1478 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1479 if (RetInReg) 1480 Flags.setInReg(); 1481 1482 // Propagate extension type if any 1483 if (ExtendKind == ISD::SIGN_EXTEND) 1484 Flags.setSExt(); 1485 else if (ExtendKind == ISD::ZERO_EXTEND) 1486 Flags.setZExt(); 1487 1488 for (unsigned i = 0; i < NumParts; ++i) { 1489 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(), 1490 VT, /*isfixed=*/true, 0, 0)); 1491 OutVals.push_back(Parts[i]); 1492 } 1493 } 1494 } 1495 } 1496 1497 // Push in swifterror virtual register as the last element of Outs. This makes 1498 // sure swifterror virtual register will be returned in the swifterror 1499 // physical register. 1500 const Function *F = I.getParent()->getParent(); 1501 if (TLI.supportSwiftError() && 1502 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { 1503 assert(FuncInfo.SwiftErrorArg && "Need a swift error argument"); 1504 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 1505 Flags.setSwiftError(); 1506 Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/, 1507 EVT(TLI.getPointerTy(DL)) /*argvt*/, 1508 true /*isfixed*/, 1 /*origidx*/, 1509 0 /*partOffs*/)); 1510 // Create SDNode for the swifterror virtual register. 1511 OutVals.push_back( 1512 DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVRegUseAt( 1513 &I, FuncInfo.MBB, FuncInfo.SwiftErrorArg).first, 1514 EVT(TLI.getPointerTy(DL)))); 1515 } 1516 1517 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); 1518 CallingConv::ID CallConv = 1519 DAG.getMachineFunction().getFunction()->getCallingConv(); 1520 Chain = DAG.getTargetLoweringInfo().LowerReturn( 1521 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG); 1522 1523 // Verify that the target's LowerReturn behaved as expected. 1524 assert(Chain.getNode() && Chain.getValueType() == MVT::Other && 1525 "LowerReturn didn't return a valid chain!"); 1526 1527 // Update the DAG with the new chain value resulting from return lowering. 1528 DAG.setRoot(Chain); 1529 } 1530 1531 /// CopyToExportRegsIfNeeded - If the given value has virtual registers 1532 /// created for it, emit nodes to copy the value into the virtual 1533 /// registers. 1534 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) { 1535 // Skip empty types 1536 if (V->getType()->isEmptyTy()) 1537 return; 1538 1539 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V); 1540 if (VMI != FuncInfo.ValueMap.end()) { 1541 assert(!V->use_empty() && "Unused value assigned virtual registers!"); 1542 CopyValueToVirtualRegister(V, VMI->second); 1543 } 1544 } 1545 1546 /// ExportFromCurrentBlock - If this condition isn't known to be exported from 1547 /// the current basic block, add it to ValueMap now so that we'll get a 1548 /// CopyTo/FromReg. 1549 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) { 1550 // No need to export constants. 1551 if (!isa<Instruction>(V) && !isa<Argument>(V)) return; 1552 1553 // Already exported? 1554 if (FuncInfo.isExportedInst(V)) return; 1555 1556 unsigned Reg = FuncInfo.InitializeRegForValue(V); 1557 CopyValueToVirtualRegister(V, Reg); 1558 } 1559 1560 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V, 1561 const BasicBlock *FromBB) { 1562 // The operands of the setcc have to be in this block. We don't know 1563 // how to export them from some other block. 1564 if (const Instruction *VI = dyn_cast<Instruction>(V)) { 1565 // Can export from current BB. 1566 if (VI->getParent() == FromBB) 1567 return true; 1568 1569 // Is already exported, noop. 1570 return FuncInfo.isExportedInst(V); 1571 } 1572 1573 // If this is an argument, we can export it if the BB is the entry block or 1574 // if it is already exported. 1575 if (isa<Argument>(V)) { 1576 if (FromBB == &FromBB->getParent()->getEntryBlock()) 1577 return true; 1578 1579 // Otherwise, can only export this if it is already exported. 1580 return FuncInfo.isExportedInst(V); 1581 } 1582 1583 // Otherwise, constants can always be exported. 1584 return true; 1585 } 1586 1587 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks. 1588 BranchProbability 1589 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src, 1590 const MachineBasicBlock *Dst) const { 1591 BranchProbabilityInfo *BPI = FuncInfo.BPI; 1592 const BasicBlock *SrcBB = Src->getBasicBlock(); 1593 const BasicBlock *DstBB = Dst->getBasicBlock(); 1594 if (!BPI) { 1595 // If BPI is not available, set the default probability as 1 / N, where N is 1596 // the number of successors. 1597 auto SuccSize = std::max<uint32_t>( 1598 std::distance(succ_begin(SrcBB), succ_end(SrcBB)), 1); 1599 return BranchProbability(1, SuccSize); 1600 } 1601 return BPI->getEdgeProbability(SrcBB, DstBB); 1602 } 1603 1604 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src, 1605 MachineBasicBlock *Dst, 1606 BranchProbability Prob) { 1607 if (!FuncInfo.BPI) 1608 Src->addSuccessorWithoutProb(Dst); 1609 else { 1610 if (Prob.isUnknown()) 1611 Prob = getEdgeProbability(Src, Dst); 1612 Src->addSuccessor(Dst, Prob); 1613 } 1614 } 1615 1616 static bool InBlock(const Value *V, const BasicBlock *BB) { 1617 if (const Instruction *I = dyn_cast<Instruction>(V)) 1618 return I->getParent() == BB; 1619 return true; 1620 } 1621 1622 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions. 1623 /// This function emits a branch and is used at the leaves of an OR or an 1624 /// AND operator tree. 1625 /// 1626 void 1627 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond, 1628 MachineBasicBlock *TBB, 1629 MachineBasicBlock *FBB, 1630 MachineBasicBlock *CurBB, 1631 MachineBasicBlock *SwitchBB, 1632 BranchProbability TProb, 1633 BranchProbability FProb, 1634 bool InvertCond) { 1635 const BasicBlock *BB = CurBB->getBasicBlock(); 1636 1637 // If the leaf of the tree is a comparison, merge the condition into 1638 // the caseblock. 1639 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) { 1640 // The operands of the cmp have to be in this block. We don't know 1641 // how to export them from some other block. If this is the first block 1642 // of the sequence, no exporting is needed. 1643 if (CurBB == SwitchBB || 1644 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && 1645 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) { 1646 ISD::CondCode Condition; 1647 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 1648 ICmpInst::Predicate Pred = 1649 InvertCond ? IC->getInversePredicate() : IC->getPredicate(); 1650 Condition = getICmpCondCode(Pred); 1651 } else { 1652 const FCmpInst *FC = cast<FCmpInst>(Cond); 1653 FCmpInst::Predicate Pred = 1654 InvertCond ? FC->getInversePredicate() : FC->getPredicate(); 1655 Condition = getFCmpCondCode(Pred); 1656 if (TM.Options.NoNaNsFPMath) 1657 Condition = getFCmpCodeWithoutNaN(Condition); 1658 } 1659 1660 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr, 1661 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb); 1662 SwitchCases.push_back(CB); 1663 return; 1664 } 1665 } 1666 1667 // Create a CaseBlock record representing this branch. 1668 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ; 1669 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()), 1670 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb); 1671 SwitchCases.push_back(CB); 1672 } 1673 1674 /// FindMergedConditions - If Cond is an expression like 1675 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond, 1676 MachineBasicBlock *TBB, 1677 MachineBasicBlock *FBB, 1678 MachineBasicBlock *CurBB, 1679 MachineBasicBlock *SwitchBB, 1680 Instruction::BinaryOps Opc, 1681 BranchProbability TProb, 1682 BranchProbability FProb, 1683 bool InvertCond) { 1684 // Skip over not part of the tree and remember to invert op and operands at 1685 // next level. 1686 if (BinaryOperator::isNot(Cond) && Cond->hasOneUse()) { 1687 const Value *CondOp = BinaryOperator::getNotArgument(Cond); 1688 if (InBlock(CondOp, CurBB->getBasicBlock())) { 1689 FindMergedConditions(CondOp, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb, 1690 !InvertCond); 1691 return; 1692 } 1693 } 1694 1695 const Instruction *BOp = dyn_cast<Instruction>(Cond); 1696 // Compute the effective opcode for Cond, taking into account whether it needs 1697 // to be inverted, e.g. 1698 // and (not (or A, B)), C 1699 // gets lowered as 1700 // and (and (not A, not B), C) 1701 unsigned BOpc = 0; 1702 if (BOp) { 1703 BOpc = BOp->getOpcode(); 1704 if (InvertCond) { 1705 if (BOpc == Instruction::And) 1706 BOpc = Instruction::Or; 1707 else if (BOpc == Instruction::Or) 1708 BOpc = Instruction::And; 1709 } 1710 } 1711 1712 // If this node is not part of the or/and tree, emit it as a branch. 1713 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) || 1714 BOpc != Opc || !BOp->hasOneUse() || 1715 BOp->getParent() != CurBB->getBasicBlock() || 1716 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) || 1717 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) { 1718 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, 1719 TProb, FProb, InvertCond); 1720 return; 1721 } 1722 1723 // Create TmpBB after CurBB. 1724 MachineFunction::iterator BBI(CurBB); 1725 MachineFunction &MF = DAG.getMachineFunction(); 1726 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock()); 1727 CurBB->getParent()->insert(++BBI, TmpBB); 1728 1729 if (Opc == Instruction::Or) { 1730 // Codegen X | Y as: 1731 // BB1: 1732 // jmp_if_X TBB 1733 // jmp TmpBB 1734 // TmpBB: 1735 // jmp_if_Y TBB 1736 // jmp FBB 1737 // 1738 1739 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 1740 // The requirement is that 1741 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 1742 // = TrueProb for original BB. 1743 // Assuming the original probabilities are A and B, one choice is to set 1744 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to 1745 // A/(1+B) and 2B/(1+B). This choice assumes that 1746 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 1747 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 1748 // TmpBB, but the math is more complicated. 1749 1750 auto NewTrueProb = TProb / 2; 1751 auto NewFalseProb = TProb / 2 + FProb; 1752 // Emit the LHS condition. 1753 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc, 1754 NewTrueProb, NewFalseProb, InvertCond); 1755 1756 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B). 1757 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb}; 1758 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 1759 // Emit the RHS condition into TmpBB. 1760 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, 1761 Probs[0], Probs[1], InvertCond); 1762 } else { 1763 assert(Opc == Instruction::And && "Unknown merge op!"); 1764 // Codegen X & Y as: 1765 // BB1: 1766 // jmp_if_X TmpBB 1767 // jmp FBB 1768 // TmpBB: 1769 // jmp_if_Y TBB 1770 // jmp FBB 1771 // 1772 // This requires creation of TmpBB after CurBB. 1773 1774 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 1775 // The requirement is that 1776 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 1777 // = FalseProb for original BB. 1778 // Assuming the original probabilities are A and B, one choice is to set 1779 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to 1780 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 == 1781 // TrueProb for BB1 * FalseProb for TmpBB. 1782 1783 auto NewTrueProb = TProb + FProb / 2; 1784 auto NewFalseProb = FProb / 2; 1785 // Emit the LHS condition. 1786 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc, 1787 NewTrueProb, NewFalseProb, InvertCond); 1788 1789 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A). 1790 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2}; 1791 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 1792 // Emit the RHS condition into TmpBB. 1793 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, 1794 Probs[0], Probs[1], InvertCond); 1795 } 1796 } 1797 1798 /// If the set of cases should be emitted as a series of branches, return true. 1799 /// If we should emit this as a bunch of and/or'd together conditions, return 1800 /// false. 1801 bool 1802 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) { 1803 if (Cases.size() != 2) return true; 1804 1805 // If this is two comparisons of the same values or'd or and'd together, they 1806 // will get folded into a single comparison, so don't emit two blocks. 1807 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 1808 Cases[0].CmpRHS == Cases[1].CmpRHS) || 1809 (Cases[0].CmpRHS == Cases[1].CmpLHS && 1810 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 1811 return false; 1812 } 1813 1814 // Handle: (X != null) | (Y != null) --> (X|Y) != 0 1815 // Handle: (X == null) & (Y == null) --> (X|Y) == 0 1816 if (Cases[0].CmpRHS == Cases[1].CmpRHS && 1817 Cases[0].CC == Cases[1].CC && 1818 isa<Constant>(Cases[0].CmpRHS) && 1819 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) { 1820 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB) 1821 return false; 1822 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB) 1823 return false; 1824 } 1825 1826 return true; 1827 } 1828 1829 void SelectionDAGBuilder::visitBr(const BranchInst &I) { 1830 MachineBasicBlock *BrMBB = FuncInfo.MBB; 1831 1832 // Update machine-CFG edges. 1833 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; 1834 1835 if (I.isUnconditional()) { 1836 // Update machine-CFG edges. 1837 BrMBB->addSuccessor(Succ0MBB); 1838 1839 // If this is not a fall-through branch or optimizations are switched off, 1840 // emit the branch. 1841 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None) 1842 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), 1843 MVT::Other, getControlRoot(), 1844 DAG.getBasicBlock(Succ0MBB))); 1845 1846 return; 1847 } 1848 1849 // If this condition is one of the special cases we handle, do special stuff 1850 // now. 1851 const Value *CondVal = I.getCondition(); 1852 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; 1853 1854 // If this is a series of conditions that are or'd or and'd together, emit 1855 // this as a sequence of branches instead of setcc's with and/or operations. 1856 // As long as jumps are not expensive, this should improve performance. 1857 // For example, instead of something like: 1858 // cmp A, B 1859 // C = seteq 1860 // cmp D, E 1861 // F = setle 1862 // or C, F 1863 // jnz foo 1864 // Emit: 1865 // cmp A, B 1866 // je foo 1867 // cmp D, E 1868 // jle foo 1869 // 1870 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) { 1871 Instruction::BinaryOps Opcode = BOp->getOpcode(); 1872 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() && 1873 !I.getMetadata(LLVMContext::MD_unpredictable) && 1874 (Opcode == Instruction::And || Opcode == Instruction::Or)) { 1875 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, 1876 Opcode, 1877 getEdgeProbability(BrMBB, Succ0MBB), 1878 getEdgeProbability(BrMBB, Succ1MBB), 1879 /*InvertCond=*/false); 1880 // If the compares in later blocks need to use values not currently 1881 // exported from this block, export them now. This block should always 1882 // be the first entry. 1883 assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!"); 1884 1885 // Allow some cases to be rejected. 1886 if (ShouldEmitAsBranches(SwitchCases)) { 1887 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) { 1888 ExportFromCurrentBlock(SwitchCases[i].CmpLHS); 1889 ExportFromCurrentBlock(SwitchCases[i].CmpRHS); 1890 } 1891 1892 // Emit the branch for this block. 1893 visitSwitchCase(SwitchCases[0], BrMBB); 1894 SwitchCases.erase(SwitchCases.begin()); 1895 return; 1896 } 1897 1898 // Okay, we decided not to do this, remove any inserted MBB's and clear 1899 // SwitchCases. 1900 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) 1901 FuncInfo.MF->erase(SwitchCases[i].ThisBB); 1902 1903 SwitchCases.clear(); 1904 } 1905 } 1906 1907 // Create a CaseBlock record representing this branch. 1908 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()), 1909 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc()); 1910 1911 // Use visitSwitchCase to actually insert the fast branch sequence for this 1912 // cond branch. 1913 visitSwitchCase(CB, BrMBB); 1914 } 1915 1916 /// visitSwitchCase - Emits the necessary code to represent a single node in 1917 /// the binary search tree resulting from lowering a switch instruction. 1918 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB, 1919 MachineBasicBlock *SwitchBB) { 1920 SDValue Cond; 1921 SDValue CondLHS = getValue(CB.CmpLHS); 1922 SDLoc dl = CB.DL; 1923 1924 // Build the setcc now. 1925 if (!CB.CmpMHS) { 1926 // Fold "(X == true)" to X and "(X == false)" to !X to 1927 // handle common cases produced by branch lowering. 1928 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) && 1929 CB.CC == ISD::SETEQ) 1930 Cond = CondLHS; 1931 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) && 1932 CB.CC == ISD::SETEQ) { 1933 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType()); 1934 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True); 1935 } else 1936 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC); 1937 } else { 1938 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now"); 1939 1940 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); 1941 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); 1942 1943 SDValue CmpOp = getValue(CB.CmpMHS); 1944 EVT VT = CmpOp.getValueType(); 1945 1946 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 1947 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT), 1948 ISD::SETLE); 1949 } else { 1950 SDValue SUB = DAG.getNode(ISD::SUB, dl, 1951 VT, CmpOp, DAG.getConstant(Low, dl, VT)); 1952 Cond = DAG.getSetCC(dl, MVT::i1, SUB, 1953 DAG.getConstant(High-Low, dl, VT), ISD::SETULE); 1954 } 1955 } 1956 1957 // Update successor info 1958 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb); 1959 // TrueBB and FalseBB are always different unless the incoming IR is 1960 // degenerate. This only happens when running llc on weird IR. 1961 if (CB.TrueBB != CB.FalseBB) 1962 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb); 1963 SwitchBB->normalizeSuccProbs(); 1964 1965 // If the lhs block is the next block, invert the condition so that we can 1966 // fall through to the lhs instead of the rhs block. 1967 if (CB.TrueBB == NextBlock(SwitchBB)) { 1968 std::swap(CB.TrueBB, CB.FalseBB); 1969 SDValue True = DAG.getConstant(1, dl, Cond.getValueType()); 1970 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True); 1971 } 1972 1973 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 1974 MVT::Other, getControlRoot(), Cond, 1975 DAG.getBasicBlock(CB.TrueBB)); 1976 1977 // Insert the false branch. Do this even if it's a fall through branch, 1978 // this makes it easier to do DAG optimizations which require inverting 1979 // the branch condition. 1980 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, 1981 DAG.getBasicBlock(CB.FalseBB)); 1982 1983 DAG.setRoot(BrCond); 1984 } 1985 1986 /// visitJumpTable - Emit JumpTable node in the current MBB 1987 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) { 1988 // Emit the code for the jump table 1989 assert(JT.Reg != -1U && "Should lower JT Header first!"); 1990 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 1991 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(), 1992 JT.Reg, PTy); 1993 SDValue Table = DAG.getJumpTable(JT.JTI, PTy); 1994 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(), 1995 MVT::Other, Index.getValue(1), 1996 Table, Index); 1997 DAG.setRoot(BrJumpTable); 1998 } 1999 2000 /// visitJumpTableHeader - This function emits necessary code to produce index 2001 /// in the JumpTable from switch case. 2002 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT, 2003 JumpTableHeader &JTH, 2004 MachineBasicBlock *SwitchBB) { 2005 SDLoc dl = getCurSDLoc(); 2006 2007 // Subtract the lowest switch case value from the value being switched on and 2008 // conditional branch to default mbb if the result is greater than the 2009 // difference between smallest and largest cases. 2010 SDValue SwitchOp = getValue(JTH.SValue); 2011 EVT VT = SwitchOp.getValueType(); 2012 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp, 2013 DAG.getConstant(JTH.First, dl, VT)); 2014 2015 // The SDNode we just created, which holds the value being switched on minus 2016 // the smallest case value, needs to be copied to a virtual register so it 2017 // can be used as an index into the jump table in a subsequent basic block. 2018 // This value may be smaller or larger than the target's pointer type, and 2019 // therefore require extension or truncating. 2020 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2021 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout())); 2022 2023 unsigned JumpTableReg = 2024 FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout())); 2025 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, 2026 JumpTableReg, SwitchOp); 2027 JT.Reg = JumpTableReg; 2028 2029 // Emit the range check for the jump table, and branch to the default block 2030 // for the switch statement if the value being switched on exceeds the largest 2031 // case in the switch. 2032 SDValue CMP = DAG.getSetCC( 2033 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 2034 Sub.getValueType()), 2035 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT); 2036 2037 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 2038 MVT::Other, CopyTo, CMP, 2039 DAG.getBasicBlock(JT.Default)); 2040 2041 // Avoid emitting unnecessary branches to the next block. 2042 if (JT.MBB != NextBlock(SwitchBB)) 2043 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, 2044 DAG.getBasicBlock(JT.MBB)); 2045 2046 DAG.setRoot(BrCond); 2047 } 2048 2049 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global 2050 /// variable if there exists one. 2051 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, 2052 SDValue &Chain) { 2053 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2054 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 2055 MachineFunction &MF = DAG.getMachineFunction(); 2056 Value *Global = TLI.getSDagStackGuard(*MF.getFunction()->getParent()); 2057 MachineSDNode *Node = 2058 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain); 2059 if (Global) { 2060 MachinePointerInfo MPInfo(Global); 2061 MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1); 2062 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 2063 MachineMemOperand::MODereferenceable; 2064 *MemRefs = MF.getMachineMemOperand(MPInfo, Flags, PtrTy.getSizeInBits() / 8, 2065 DAG.getEVTAlignment(PtrTy)); 2066 Node->setMemRefs(MemRefs, MemRefs + 1); 2067 } 2068 return SDValue(Node, 0); 2069 } 2070 2071 /// Codegen a new tail for a stack protector check ParentMBB which has had its 2072 /// tail spliced into a stack protector check success bb. 2073 /// 2074 /// For a high level explanation of how this fits into the stack protector 2075 /// generation see the comment on the declaration of class 2076 /// StackProtectorDescriptor. 2077 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD, 2078 MachineBasicBlock *ParentBB) { 2079 2080 // First create the loads to the guard/stack slot for the comparison. 2081 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2082 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 2083 2084 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo(); 2085 int FI = MFI.getStackProtectorIndex(); 2086 2087 SDValue Guard; 2088 SDLoc dl = getCurSDLoc(); 2089 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy); 2090 const Module &M = *ParentBB->getParent()->getFunction()->getParent(); 2091 unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext())); 2092 2093 // Generate code to load the content of the guard slot. 2094 SDValue StackSlot = DAG.getLoad( 2095 PtrTy, dl, DAG.getEntryNode(), StackSlotPtr, 2096 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align, 2097 MachineMemOperand::MOVolatile); 2098 2099 // Retrieve guard check function, nullptr if instrumentation is inlined. 2100 if (const Value *GuardCheck = TLI.getSSPStackGuardCheck(M)) { 2101 // The target provides a guard check function to validate the guard value. 2102 // Generate a call to that function with the content of the guard slot as 2103 // argument. 2104 auto *Fn = cast<Function>(GuardCheck); 2105 FunctionType *FnTy = Fn->getFunctionType(); 2106 assert(FnTy->getNumParams() == 1 && "Invalid function signature"); 2107 2108 TargetLowering::ArgListTy Args; 2109 TargetLowering::ArgListEntry Entry; 2110 Entry.Node = StackSlot; 2111 Entry.Ty = FnTy->getParamType(0); 2112 if (Fn->hasAttribute(1, Attribute::AttrKind::InReg)) 2113 Entry.IsInReg = true; 2114 Args.push_back(Entry); 2115 2116 TargetLowering::CallLoweringInfo CLI(DAG); 2117 CLI.setDebugLoc(getCurSDLoc()) 2118 .setChain(DAG.getEntryNode()) 2119 .setCallee(Fn->getCallingConv(), FnTy->getReturnType(), 2120 getValue(GuardCheck), std::move(Args)); 2121 2122 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 2123 DAG.setRoot(Result.second); 2124 return; 2125 } 2126 2127 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD. 2128 // Otherwise, emit a volatile load to retrieve the stack guard value. 2129 SDValue Chain = DAG.getEntryNode(); 2130 if (TLI.useLoadStackGuardNode()) { 2131 Guard = getLoadStackGuard(DAG, dl, Chain); 2132 } else { 2133 const Value *IRGuard = TLI.getSDagStackGuard(M); 2134 SDValue GuardPtr = getValue(IRGuard); 2135 2136 Guard = 2137 DAG.getLoad(PtrTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0), 2138 Align, MachineMemOperand::MOVolatile); 2139 } 2140 2141 // Perform the comparison via a subtract/getsetcc. 2142 EVT VT = Guard.getValueType(); 2143 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, StackSlot); 2144 2145 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(), 2146 *DAG.getContext(), 2147 Sub.getValueType()), 2148 Sub, DAG.getConstant(0, dl, VT), ISD::SETNE); 2149 2150 // If the sub is not 0, then we know the guard/stackslot do not equal, so 2151 // branch to failure MBB. 2152 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 2153 MVT::Other, StackSlot.getOperand(0), 2154 Cmp, DAG.getBasicBlock(SPD.getFailureMBB())); 2155 // Otherwise branch to success MBB. 2156 SDValue Br = DAG.getNode(ISD::BR, dl, 2157 MVT::Other, BrCond, 2158 DAG.getBasicBlock(SPD.getSuccessMBB())); 2159 2160 DAG.setRoot(Br); 2161 } 2162 2163 /// Codegen the failure basic block for a stack protector check. 2164 /// 2165 /// A failure stack protector machine basic block consists simply of a call to 2166 /// __stack_chk_fail(). 2167 /// 2168 /// For a high level explanation of how this fits into the stack protector 2169 /// generation see the comment on the declaration of class 2170 /// StackProtectorDescriptor. 2171 void 2172 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) { 2173 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2174 SDValue Chain = 2175 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid, 2176 None, false, getCurSDLoc(), false, false).second; 2177 DAG.setRoot(Chain); 2178 } 2179 2180 /// visitBitTestHeader - This function emits necessary code to produce value 2181 /// suitable for "bit tests" 2182 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B, 2183 MachineBasicBlock *SwitchBB) { 2184 SDLoc dl = getCurSDLoc(); 2185 2186 // Subtract the minimum value 2187 SDValue SwitchOp = getValue(B.SValue); 2188 EVT VT = SwitchOp.getValueType(); 2189 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp, 2190 DAG.getConstant(B.First, dl, VT)); 2191 2192 // Check range 2193 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2194 SDValue RangeCmp = DAG.getSetCC( 2195 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 2196 Sub.getValueType()), 2197 Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT); 2198 2199 // Determine the type of the test operands. 2200 bool UsePtrType = false; 2201 if (!TLI.isTypeLegal(VT)) 2202 UsePtrType = true; 2203 else { 2204 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i) 2205 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) { 2206 // Switch table case range are encoded into series of masks. 2207 // Just use pointer type, it's guaranteed to fit. 2208 UsePtrType = true; 2209 break; 2210 } 2211 } 2212 if (UsePtrType) { 2213 VT = TLI.getPointerTy(DAG.getDataLayout()); 2214 Sub = DAG.getZExtOrTrunc(Sub, dl, VT); 2215 } 2216 2217 B.RegVT = VT.getSimpleVT(); 2218 B.Reg = FuncInfo.CreateReg(B.RegVT); 2219 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub); 2220 2221 MachineBasicBlock* MBB = B.Cases[0].ThisBB; 2222 2223 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb); 2224 addSuccessorWithProb(SwitchBB, MBB, B.Prob); 2225 SwitchBB->normalizeSuccProbs(); 2226 2227 SDValue BrRange = DAG.getNode(ISD::BRCOND, dl, 2228 MVT::Other, CopyTo, RangeCmp, 2229 DAG.getBasicBlock(B.Default)); 2230 2231 // Avoid emitting unnecessary branches to the next block. 2232 if (MBB != NextBlock(SwitchBB)) 2233 BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange, 2234 DAG.getBasicBlock(MBB)); 2235 2236 DAG.setRoot(BrRange); 2237 } 2238 2239 /// visitBitTestCase - this function produces one "bit test" 2240 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB, 2241 MachineBasicBlock* NextMBB, 2242 BranchProbability BranchProbToNext, 2243 unsigned Reg, 2244 BitTestCase &B, 2245 MachineBasicBlock *SwitchBB) { 2246 SDLoc dl = getCurSDLoc(); 2247 MVT VT = BB.RegVT; 2248 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT); 2249 SDValue Cmp; 2250 unsigned PopCount = countPopulation(B.Mask); 2251 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2252 if (PopCount == 1) { 2253 // Testing for a single bit; just compare the shift count with what it 2254 // would need to be to shift a 1 bit in that position. 2255 Cmp = DAG.getSetCC( 2256 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 2257 ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT), 2258 ISD::SETEQ); 2259 } else if (PopCount == BB.Range) { 2260 // There is only one zero bit in the range, test for it directly. 2261 Cmp = DAG.getSetCC( 2262 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 2263 ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT), 2264 ISD::SETNE); 2265 } else { 2266 // Make desired shift 2267 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT, 2268 DAG.getConstant(1, dl, VT), ShiftOp); 2269 2270 // Emit bit tests and jumps 2271 SDValue AndOp = DAG.getNode(ISD::AND, dl, 2272 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT)); 2273 Cmp = DAG.getSetCC( 2274 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 2275 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE); 2276 } 2277 2278 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb. 2279 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb); 2280 // The branch probability from SwitchBB to NextMBB is BranchProbToNext. 2281 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext); 2282 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is 2283 // one as they are relative probabilities (and thus work more like weights), 2284 // and hence we need to normalize them to let the sum of them become one. 2285 SwitchBB->normalizeSuccProbs(); 2286 2287 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl, 2288 MVT::Other, getControlRoot(), 2289 Cmp, DAG.getBasicBlock(B.TargetBB)); 2290 2291 // Avoid emitting unnecessary branches to the next block. 2292 if (NextMBB != NextBlock(SwitchBB)) 2293 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd, 2294 DAG.getBasicBlock(NextMBB)); 2295 2296 DAG.setRoot(BrAnd); 2297 } 2298 2299 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { 2300 MachineBasicBlock *InvokeMBB = FuncInfo.MBB; 2301 2302 // Retrieve successors. Look through artificial IR level blocks like 2303 // catchswitch for successors. 2304 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; 2305 const BasicBlock *EHPadBB = I.getSuccessor(1); 2306 2307 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't 2308 // have to do anything here to lower funclet bundles. 2309 assert(!I.hasOperandBundlesOtherThan( 2310 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && 2311 "Cannot lower invokes with arbitrary operand bundles yet!"); 2312 2313 const Value *Callee(I.getCalledValue()); 2314 const Function *Fn = dyn_cast<Function>(Callee); 2315 if (isa<InlineAsm>(Callee)) 2316 visitInlineAsm(&I); 2317 else if (Fn && Fn->isIntrinsic()) { 2318 switch (Fn->getIntrinsicID()) { 2319 default: 2320 llvm_unreachable("Cannot invoke this intrinsic"); 2321 case Intrinsic::donothing: 2322 // Ignore invokes to @llvm.donothing: jump directly to the next BB. 2323 break; 2324 case Intrinsic::experimental_patchpoint_void: 2325 case Intrinsic::experimental_patchpoint_i64: 2326 visitPatchpoint(&I, EHPadBB); 2327 break; 2328 case Intrinsic::experimental_gc_statepoint: 2329 LowerStatepoint(ImmutableStatepoint(&I), EHPadBB); 2330 break; 2331 } 2332 } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) { 2333 // Currently we do not lower any intrinsic calls with deopt operand bundles. 2334 // Eventually we will support lowering the @llvm.experimental.deoptimize 2335 // intrinsic, and right now there are no plans to support other intrinsics 2336 // with deopt state. 2337 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB); 2338 } else { 2339 LowerCallTo(&I, getValue(Callee), false, EHPadBB); 2340 } 2341 2342 // If the value of the invoke is used outside of its defining block, make it 2343 // available as a virtual register. 2344 // We already took care of the exported value for the statepoint instruction 2345 // during call to the LowerStatepoint. 2346 if (!isStatepoint(I)) { 2347 CopyToExportRegsIfNeeded(&I); 2348 } 2349 2350 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; 2351 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2352 BranchProbability EHPadBBProb = 2353 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB) 2354 : BranchProbability::getZero(); 2355 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests); 2356 2357 // Update successor info. 2358 addSuccessorWithProb(InvokeMBB, Return); 2359 for (auto &UnwindDest : UnwindDests) { 2360 UnwindDest.first->setIsEHPad(); 2361 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second); 2362 } 2363 InvokeMBB->normalizeSuccProbs(); 2364 2365 // Drop into normal successor. 2366 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), 2367 MVT::Other, getControlRoot(), 2368 DAG.getBasicBlock(Return))); 2369 } 2370 2371 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) { 2372 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!"); 2373 } 2374 2375 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) { 2376 assert(FuncInfo.MBB->isEHPad() && 2377 "Call to landingpad not in landing pad!"); 2378 2379 MachineBasicBlock *MBB = FuncInfo.MBB; 2380 addLandingPadInfo(LP, *MBB); 2381 2382 // If there aren't registers to copy the values into (e.g., during SjLj 2383 // exceptions), then don't bother to create these DAG nodes. 2384 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2385 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn(); 2386 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 2387 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 2388 return; 2389 2390 // If landingpad's return type is token type, we don't create DAG nodes 2391 // for its exception pointer and selector value. The extraction of exception 2392 // pointer or selector value from token type landingpads is not currently 2393 // supported. 2394 if (LP.getType()->isTokenTy()) 2395 return; 2396 2397 SmallVector<EVT, 2> ValueVTs; 2398 SDLoc dl = getCurSDLoc(); 2399 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs); 2400 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported"); 2401 2402 // Get the two live-in registers as SDValues. The physregs have already been 2403 // copied into virtual registers. 2404 SDValue Ops[2]; 2405 if (FuncInfo.ExceptionPointerVirtReg) { 2406 Ops[0] = DAG.getZExtOrTrunc( 2407 DAG.getCopyFromReg(DAG.getEntryNode(), dl, 2408 FuncInfo.ExceptionPointerVirtReg, 2409 TLI.getPointerTy(DAG.getDataLayout())), 2410 dl, ValueVTs[0]); 2411 } else { 2412 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout())); 2413 } 2414 Ops[1] = DAG.getZExtOrTrunc( 2415 DAG.getCopyFromReg(DAG.getEntryNode(), dl, 2416 FuncInfo.ExceptionSelectorVirtReg, 2417 TLI.getPointerTy(DAG.getDataLayout())), 2418 dl, ValueVTs[1]); 2419 2420 // Merge into one. 2421 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, 2422 DAG.getVTList(ValueVTs), Ops); 2423 setValue(&LP, Res); 2424 } 2425 2426 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) { 2427 #ifndef NDEBUG 2428 for (const CaseCluster &CC : Clusters) 2429 assert(CC.Low == CC.High && "Input clusters must be single-case"); 2430 #endif 2431 2432 std::sort(Clusters.begin(), Clusters.end(), 2433 [](const CaseCluster &a, const CaseCluster &b) { 2434 return a.Low->getValue().slt(b.Low->getValue()); 2435 }); 2436 2437 // Merge adjacent clusters with the same destination. 2438 const unsigned N = Clusters.size(); 2439 unsigned DstIndex = 0; 2440 for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) { 2441 CaseCluster &CC = Clusters[SrcIndex]; 2442 const ConstantInt *CaseVal = CC.Low; 2443 MachineBasicBlock *Succ = CC.MBB; 2444 2445 if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ && 2446 (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) { 2447 // If this case has the same successor and is a neighbour, merge it into 2448 // the previous cluster. 2449 Clusters[DstIndex - 1].High = CaseVal; 2450 Clusters[DstIndex - 1].Prob += CC.Prob; 2451 } else { 2452 std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex], 2453 sizeof(Clusters[SrcIndex])); 2454 } 2455 } 2456 Clusters.resize(DstIndex); 2457 } 2458 2459 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First, 2460 MachineBasicBlock *Last) { 2461 // Update JTCases. 2462 for (unsigned i = 0, e = JTCases.size(); i != e; ++i) 2463 if (JTCases[i].first.HeaderBB == First) 2464 JTCases[i].first.HeaderBB = Last; 2465 2466 // Update BitTestCases. 2467 for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i) 2468 if (BitTestCases[i].Parent == First) 2469 BitTestCases[i].Parent = Last; 2470 } 2471 2472 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) { 2473 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB; 2474 2475 // Update machine-CFG edges with unique successors. 2476 SmallSet<BasicBlock*, 32> Done; 2477 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) { 2478 BasicBlock *BB = I.getSuccessor(i); 2479 bool Inserted = Done.insert(BB).second; 2480 if (!Inserted) 2481 continue; 2482 2483 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB]; 2484 addSuccessorWithProb(IndirectBrMBB, Succ); 2485 } 2486 IndirectBrMBB->normalizeSuccProbs(); 2487 2488 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(), 2489 MVT::Other, getControlRoot(), 2490 getValue(I.getAddress()))); 2491 } 2492 2493 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) { 2494 if (DAG.getTarget().Options.TrapUnreachable) 2495 DAG.setRoot( 2496 DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot())); 2497 } 2498 2499 void SelectionDAGBuilder::visitFSub(const User &I) { 2500 // -0.0 - X --> fneg 2501 Type *Ty = I.getType(); 2502 if (isa<Constant>(I.getOperand(0)) && 2503 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) { 2504 SDValue Op2 = getValue(I.getOperand(1)); 2505 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(), 2506 Op2.getValueType(), Op2)); 2507 return; 2508 } 2509 2510 visitBinary(I, ISD::FSUB); 2511 } 2512 2513 /// Checks if the given instruction performs a vector reduction, in which case 2514 /// we have the freedom to alter the elements in the result as long as the 2515 /// reduction of them stays unchanged. 2516 static bool isVectorReductionOp(const User *I) { 2517 const Instruction *Inst = dyn_cast<Instruction>(I); 2518 if (!Inst || !Inst->getType()->isVectorTy()) 2519 return false; 2520 2521 auto OpCode = Inst->getOpcode(); 2522 switch (OpCode) { 2523 case Instruction::Add: 2524 case Instruction::Mul: 2525 case Instruction::And: 2526 case Instruction::Or: 2527 case Instruction::Xor: 2528 break; 2529 case Instruction::FAdd: 2530 case Instruction::FMul: 2531 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst)) 2532 if (FPOp->getFastMathFlags().unsafeAlgebra()) 2533 break; 2534 LLVM_FALLTHROUGH; 2535 default: 2536 return false; 2537 } 2538 2539 unsigned ElemNum = Inst->getType()->getVectorNumElements(); 2540 unsigned ElemNumToReduce = ElemNum; 2541 2542 // Do DFS search on the def-use chain from the given instruction. We only 2543 // allow four kinds of operations during the search until we reach the 2544 // instruction that extracts the first element from the vector: 2545 // 2546 // 1. The reduction operation of the same opcode as the given instruction. 2547 // 2548 // 2. PHI node. 2549 // 2550 // 3. ShuffleVector instruction together with a reduction operation that 2551 // does a partial reduction. 2552 // 2553 // 4. ExtractElement that extracts the first element from the vector, and we 2554 // stop searching the def-use chain here. 2555 // 2556 // 3 & 4 above perform a reduction on all elements of the vector. We push defs 2557 // from 1-3 to the stack to continue the DFS. The given instruction is not 2558 // a reduction operation if we meet any other instructions other than those 2559 // listed above. 2560 2561 SmallVector<const User *, 16> UsersToVisit{Inst}; 2562 SmallPtrSet<const User *, 16> Visited; 2563 bool ReduxExtracted = false; 2564 2565 while (!UsersToVisit.empty()) { 2566 auto User = UsersToVisit.back(); 2567 UsersToVisit.pop_back(); 2568 if (!Visited.insert(User).second) 2569 continue; 2570 2571 for (const auto &U : User->users()) { 2572 auto Inst = dyn_cast<Instruction>(U); 2573 if (!Inst) 2574 return false; 2575 2576 if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) { 2577 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst)) 2578 if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().unsafeAlgebra()) 2579 return false; 2580 UsersToVisit.push_back(U); 2581 } else if (const ShuffleVectorInst *ShufInst = 2582 dyn_cast<ShuffleVectorInst>(U)) { 2583 // Detect the following pattern: A ShuffleVector instruction together 2584 // with a reduction that do partial reduction on the first and second 2585 // ElemNumToReduce / 2 elements, and store the result in 2586 // ElemNumToReduce / 2 elements in another vector. 2587 2588 unsigned ResultElements = ShufInst->getType()->getVectorNumElements(); 2589 if (ResultElements < ElemNum) 2590 return false; 2591 2592 if (ElemNumToReduce == 1) 2593 return false; 2594 if (!isa<UndefValue>(U->getOperand(1))) 2595 return false; 2596 for (unsigned i = 0; i < ElemNumToReduce / 2; ++i) 2597 if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2)) 2598 return false; 2599 for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i) 2600 if (ShufInst->getMaskValue(i) != -1) 2601 return false; 2602 2603 // There is only one user of this ShuffleVector instruction, which 2604 // must be a reduction operation. 2605 if (!U->hasOneUse()) 2606 return false; 2607 2608 auto U2 = dyn_cast<Instruction>(*U->user_begin()); 2609 if (!U2 || U2->getOpcode() != OpCode) 2610 return false; 2611 2612 // Check operands of the reduction operation. 2613 if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) || 2614 (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) { 2615 UsersToVisit.push_back(U2); 2616 ElemNumToReduce /= 2; 2617 } else 2618 return false; 2619 } else if (isa<ExtractElementInst>(U)) { 2620 // At this moment we should have reduced all elements in the vector. 2621 if (ElemNumToReduce != 1) 2622 return false; 2623 2624 const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1)); 2625 if (!Val || Val->getZExtValue() != 0) 2626 return false; 2627 2628 ReduxExtracted = true; 2629 } else 2630 return false; 2631 } 2632 } 2633 return ReduxExtracted; 2634 } 2635 2636 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) { 2637 SDValue Op1 = getValue(I.getOperand(0)); 2638 SDValue Op2 = getValue(I.getOperand(1)); 2639 2640 bool nuw = false; 2641 bool nsw = false; 2642 bool exact = false; 2643 bool vec_redux = false; 2644 FastMathFlags FMF; 2645 2646 if (const OverflowingBinaryOperator *OFBinOp = 2647 dyn_cast<const OverflowingBinaryOperator>(&I)) { 2648 nuw = OFBinOp->hasNoUnsignedWrap(); 2649 nsw = OFBinOp->hasNoSignedWrap(); 2650 } 2651 if (const PossiblyExactOperator *ExactOp = 2652 dyn_cast<const PossiblyExactOperator>(&I)) 2653 exact = ExactOp->isExact(); 2654 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&I)) 2655 FMF = FPOp->getFastMathFlags(); 2656 2657 if (isVectorReductionOp(&I)) { 2658 vec_redux = true; 2659 DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n"); 2660 } 2661 2662 SDNodeFlags Flags; 2663 Flags.setExact(exact); 2664 Flags.setNoSignedWrap(nsw); 2665 Flags.setNoUnsignedWrap(nuw); 2666 Flags.setVectorReduction(vec_redux); 2667 Flags.setAllowReciprocal(FMF.allowReciprocal()); 2668 Flags.setAllowContract(FMF.allowContract()); 2669 Flags.setNoInfs(FMF.noInfs()); 2670 Flags.setNoNaNs(FMF.noNaNs()); 2671 Flags.setNoSignedZeros(FMF.noSignedZeros()); 2672 Flags.setUnsafeAlgebra(FMF.unsafeAlgebra()); 2673 2674 SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(), 2675 Op1, Op2, Flags); 2676 setValue(&I, BinNodeValue); 2677 } 2678 2679 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) { 2680 SDValue Op1 = getValue(I.getOperand(0)); 2681 SDValue Op2 = getValue(I.getOperand(1)); 2682 2683 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy( 2684 Op2.getValueType(), DAG.getDataLayout()); 2685 2686 // Coerce the shift amount to the right type if we can. 2687 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) { 2688 unsigned ShiftSize = ShiftTy.getSizeInBits(); 2689 unsigned Op2Size = Op2.getValueSizeInBits(); 2690 SDLoc DL = getCurSDLoc(); 2691 2692 // If the operand is smaller than the shift count type, promote it. 2693 if (ShiftSize > Op2Size) 2694 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2); 2695 2696 // If the operand is larger than the shift count type but the shift 2697 // count type has enough bits to represent any shift value, truncate 2698 // it now. This is a common case and it exposes the truncate to 2699 // optimization early. 2700 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits())) 2701 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2); 2702 // Otherwise we'll need to temporarily settle for some other convenient 2703 // type. Type legalization will make adjustments once the shiftee is split. 2704 else 2705 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32); 2706 } 2707 2708 bool nuw = false; 2709 bool nsw = false; 2710 bool exact = false; 2711 2712 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) { 2713 2714 if (const OverflowingBinaryOperator *OFBinOp = 2715 dyn_cast<const OverflowingBinaryOperator>(&I)) { 2716 nuw = OFBinOp->hasNoUnsignedWrap(); 2717 nsw = OFBinOp->hasNoSignedWrap(); 2718 } 2719 if (const PossiblyExactOperator *ExactOp = 2720 dyn_cast<const PossiblyExactOperator>(&I)) 2721 exact = ExactOp->isExact(); 2722 } 2723 SDNodeFlags Flags; 2724 Flags.setExact(exact); 2725 Flags.setNoSignedWrap(nsw); 2726 Flags.setNoUnsignedWrap(nuw); 2727 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2, 2728 Flags); 2729 setValue(&I, Res); 2730 } 2731 2732 void SelectionDAGBuilder::visitSDiv(const User &I) { 2733 SDValue Op1 = getValue(I.getOperand(0)); 2734 SDValue Op2 = getValue(I.getOperand(1)); 2735 2736 SDNodeFlags Flags; 2737 Flags.setExact(isa<PossiblyExactOperator>(&I) && 2738 cast<PossiblyExactOperator>(&I)->isExact()); 2739 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1, 2740 Op2, Flags)); 2741 } 2742 2743 void SelectionDAGBuilder::visitICmp(const User &I) { 2744 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE; 2745 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I)) 2746 predicate = IC->getPredicate(); 2747 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) 2748 predicate = ICmpInst::Predicate(IC->getPredicate()); 2749 SDValue Op1 = getValue(I.getOperand(0)); 2750 SDValue Op2 = getValue(I.getOperand(1)); 2751 ISD::CondCode Opcode = getICmpCondCode(predicate); 2752 2753 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2754 I.getType()); 2755 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode)); 2756 } 2757 2758 void SelectionDAGBuilder::visitFCmp(const User &I) { 2759 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE; 2760 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I)) 2761 predicate = FC->getPredicate(); 2762 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) 2763 predicate = FCmpInst::Predicate(FC->getPredicate()); 2764 SDValue Op1 = getValue(I.getOperand(0)); 2765 SDValue Op2 = getValue(I.getOperand(1)); 2766 ISD::CondCode Condition = getFCmpCondCode(predicate); 2767 2768 // FIXME: Fcmp instructions have fast-math-flags in IR, so we should use them. 2769 // FIXME: We should propagate the fast-math-flags to the DAG node itself for 2770 // further optimization, but currently FMF is only applicable to binary nodes. 2771 if (TM.Options.NoNaNsFPMath) 2772 Condition = getFCmpCodeWithoutNaN(Condition); 2773 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2774 I.getType()); 2775 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition)); 2776 } 2777 2778 // Check if the condition of the select has one use or two users that are both 2779 // selects with the same condition. 2780 static bool hasOnlySelectUsers(const Value *Cond) { 2781 return all_of(Cond->users(), [](const Value *V) { 2782 return isa<SelectInst>(V); 2783 }); 2784 } 2785 2786 void SelectionDAGBuilder::visitSelect(const User &I) { 2787 SmallVector<EVT, 4> ValueVTs; 2788 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(), 2789 ValueVTs); 2790 unsigned NumValues = ValueVTs.size(); 2791 if (NumValues == 0) return; 2792 2793 SmallVector<SDValue, 4> Values(NumValues); 2794 SDValue Cond = getValue(I.getOperand(0)); 2795 SDValue LHSVal = getValue(I.getOperand(1)); 2796 SDValue RHSVal = getValue(I.getOperand(2)); 2797 auto BaseOps = {Cond}; 2798 ISD::NodeType OpCode = Cond.getValueType().isVector() ? 2799 ISD::VSELECT : ISD::SELECT; 2800 2801 // Min/max matching is only viable if all output VTs are the same. 2802 if (std::equal(ValueVTs.begin(), ValueVTs.end(), ValueVTs.begin())) { 2803 EVT VT = ValueVTs[0]; 2804 LLVMContext &Ctx = *DAG.getContext(); 2805 auto &TLI = DAG.getTargetLoweringInfo(); 2806 2807 // We care about the legality of the operation after it has been type 2808 // legalized. 2809 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal && 2810 VT != TLI.getTypeToTransformTo(Ctx, VT)) 2811 VT = TLI.getTypeToTransformTo(Ctx, VT); 2812 2813 // If the vselect is legal, assume we want to leave this as a vector setcc + 2814 // vselect. Otherwise, if this is going to be scalarized, we want to see if 2815 // min/max is legal on the scalar type. 2816 bool UseScalarMinMax = VT.isVector() && 2817 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT); 2818 2819 Value *LHS, *RHS; 2820 auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS); 2821 ISD::NodeType Opc = ISD::DELETED_NODE; 2822 switch (SPR.Flavor) { 2823 case SPF_UMAX: Opc = ISD::UMAX; break; 2824 case SPF_UMIN: Opc = ISD::UMIN; break; 2825 case SPF_SMAX: Opc = ISD::SMAX; break; 2826 case SPF_SMIN: Opc = ISD::SMIN; break; 2827 case SPF_FMINNUM: 2828 switch (SPR.NaNBehavior) { 2829 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?"); 2830 case SPNB_RETURNS_NAN: Opc = ISD::FMINNAN; break; 2831 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break; 2832 case SPNB_RETURNS_ANY: { 2833 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT)) 2834 Opc = ISD::FMINNUM; 2835 else if (TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT)) 2836 Opc = ISD::FMINNAN; 2837 else if (UseScalarMinMax) 2838 Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ? 2839 ISD::FMINNUM : ISD::FMINNAN; 2840 break; 2841 } 2842 } 2843 break; 2844 case SPF_FMAXNUM: 2845 switch (SPR.NaNBehavior) { 2846 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?"); 2847 case SPNB_RETURNS_NAN: Opc = ISD::FMAXNAN; break; 2848 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break; 2849 case SPNB_RETURNS_ANY: 2850 2851 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT)) 2852 Opc = ISD::FMAXNUM; 2853 else if (TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT)) 2854 Opc = ISD::FMAXNAN; 2855 else if (UseScalarMinMax) 2856 Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ? 2857 ISD::FMAXNUM : ISD::FMAXNAN; 2858 break; 2859 } 2860 break; 2861 default: break; 2862 } 2863 2864 if (Opc != ISD::DELETED_NODE && 2865 (TLI.isOperationLegalOrCustom(Opc, VT) || 2866 (UseScalarMinMax && 2867 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) && 2868 // If the underlying comparison instruction is used by any other 2869 // instruction, the consumed instructions won't be destroyed, so it is 2870 // not profitable to convert to a min/max. 2871 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) { 2872 OpCode = Opc; 2873 LHSVal = getValue(LHS); 2874 RHSVal = getValue(RHS); 2875 BaseOps = {}; 2876 } 2877 } 2878 2879 for (unsigned i = 0; i != NumValues; ++i) { 2880 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end()); 2881 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i)); 2882 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i)); 2883 Values[i] = DAG.getNode(OpCode, getCurSDLoc(), 2884 LHSVal.getNode()->getValueType(LHSVal.getResNo()+i), 2885 Ops); 2886 } 2887 2888 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 2889 DAG.getVTList(ValueVTs), Values)); 2890 } 2891 2892 void SelectionDAGBuilder::visitTrunc(const User &I) { 2893 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). 2894 SDValue N = getValue(I.getOperand(0)); 2895 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2896 I.getType()); 2897 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N)); 2898 } 2899 2900 void SelectionDAGBuilder::visitZExt(const User &I) { 2901 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 2902 // ZExt also can't be a cast to bool for same reason. So, nothing much to do 2903 SDValue N = getValue(I.getOperand(0)); 2904 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2905 I.getType()); 2906 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N)); 2907 } 2908 2909 void SelectionDAGBuilder::visitSExt(const User &I) { 2910 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 2911 // SExt also can't be a cast to bool for same reason. So, nothing much to do 2912 SDValue N = getValue(I.getOperand(0)); 2913 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2914 I.getType()); 2915 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N)); 2916 } 2917 2918 void SelectionDAGBuilder::visitFPTrunc(const User &I) { 2919 // FPTrunc is never a no-op cast, no need to check 2920 SDValue N = getValue(I.getOperand(0)); 2921 SDLoc dl = getCurSDLoc(); 2922 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2923 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 2924 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N, 2925 DAG.getTargetConstant( 2926 0, dl, TLI.getPointerTy(DAG.getDataLayout())))); 2927 } 2928 2929 void SelectionDAGBuilder::visitFPExt(const User &I) { 2930 // FPExt is never a no-op cast, no need to check 2931 SDValue N = getValue(I.getOperand(0)); 2932 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2933 I.getType()); 2934 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N)); 2935 } 2936 2937 void SelectionDAGBuilder::visitFPToUI(const User &I) { 2938 // FPToUI is never a no-op cast, no need to check 2939 SDValue N = getValue(I.getOperand(0)); 2940 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2941 I.getType()); 2942 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N)); 2943 } 2944 2945 void SelectionDAGBuilder::visitFPToSI(const User &I) { 2946 // FPToSI is never a no-op cast, no need to check 2947 SDValue N = getValue(I.getOperand(0)); 2948 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2949 I.getType()); 2950 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N)); 2951 } 2952 2953 void SelectionDAGBuilder::visitUIToFP(const User &I) { 2954 // UIToFP is never a no-op cast, no need to check 2955 SDValue N = getValue(I.getOperand(0)); 2956 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2957 I.getType()); 2958 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N)); 2959 } 2960 2961 void SelectionDAGBuilder::visitSIToFP(const User &I) { 2962 // SIToFP is never a no-op cast, no need to check 2963 SDValue N = getValue(I.getOperand(0)); 2964 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2965 I.getType()); 2966 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N)); 2967 } 2968 2969 void SelectionDAGBuilder::visitPtrToInt(const User &I) { 2970 // What to do depends on the size of the integer and the size of the pointer. 2971 // We can either truncate, zero extend, or no-op, accordingly. 2972 SDValue N = getValue(I.getOperand(0)); 2973 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2974 I.getType()); 2975 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT)); 2976 } 2977 2978 void SelectionDAGBuilder::visitIntToPtr(const User &I) { 2979 // What to do depends on the size of the integer and the size of the pointer. 2980 // We can either truncate, zero extend, or no-op, accordingly. 2981 SDValue N = getValue(I.getOperand(0)); 2982 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2983 I.getType()); 2984 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT)); 2985 } 2986 2987 void SelectionDAGBuilder::visitBitCast(const User &I) { 2988 SDValue N = getValue(I.getOperand(0)); 2989 SDLoc dl = getCurSDLoc(); 2990 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 2991 I.getType()); 2992 2993 // BitCast assures us that source and destination are the same size so this is 2994 // either a BITCAST or a no-op. 2995 if (DestVT != N.getValueType()) 2996 setValue(&I, DAG.getNode(ISD::BITCAST, dl, 2997 DestVT, N)); // convert types. 2998 // Check if the original LLVM IR Operand was a ConstantInt, because getValue() 2999 // might fold any kind of constant expression to an integer constant and that 3000 // is not what we are looking for. Only recognize a bitcast of a genuine 3001 // constant integer as an opaque constant. 3002 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0))) 3003 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false, 3004 /*isOpaque*/true)); 3005 else 3006 setValue(&I, N); // noop cast. 3007 } 3008 3009 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) { 3010 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3011 const Value *SV = I.getOperand(0); 3012 SDValue N = getValue(SV); 3013 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3014 3015 unsigned SrcAS = SV->getType()->getPointerAddressSpace(); 3016 unsigned DestAS = I.getType()->getPointerAddressSpace(); 3017 3018 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) 3019 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS); 3020 3021 setValue(&I, N); 3022 } 3023 3024 void SelectionDAGBuilder::visitInsertElement(const User &I) { 3025 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3026 SDValue InVec = getValue(I.getOperand(0)); 3027 SDValue InVal = getValue(I.getOperand(1)); 3028 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(), 3029 TLI.getVectorIdxTy(DAG.getDataLayout())); 3030 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(), 3031 TLI.getValueType(DAG.getDataLayout(), I.getType()), 3032 InVec, InVal, InIdx)); 3033 } 3034 3035 void SelectionDAGBuilder::visitExtractElement(const User &I) { 3036 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3037 SDValue InVec = getValue(I.getOperand(0)); 3038 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(), 3039 TLI.getVectorIdxTy(DAG.getDataLayout())); 3040 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(), 3041 TLI.getValueType(DAG.getDataLayout(), I.getType()), 3042 InVec, InIdx)); 3043 } 3044 3045 void SelectionDAGBuilder::visitShuffleVector(const User &I) { 3046 SDValue Src1 = getValue(I.getOperand(0)); 3047 SDValue Src2 = getValue(I.getOperand(1)); 3048 SDLoc DL = getCurSDLoc(); 3049 3050 SmallVector<int, 8> Mask; 3051 ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask); 3052 unsigned MaskNumElts = Mask.size(); 3053 3054 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3055 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3056 EVT SrcVT = Src1.getValueType(); 3057 unsigned SrcNumElts = SrcVT.getVectorNumElements(); 3058 3059 if (SrcNumElts == MaskNumElts) { 3060 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask)); 3061 return; 3062 } 3063 3064 // Normalize the shuffle vector since mask and vector length don't match. 3065 if (SrcNumElts < MaskNumElts) { 3066 // Mask is longer than the source vectors. We can use concatenate vector to 3067 // make the mask and vectors lengths match. 3068 3069 if (MaskNumElts % SrcNumElts == 0) { 3070 // Mask length is a multiple of the source vector length. 3071 // Check if the shuffle is some kind of concatenation of the input 3072 // vectors. 3073 unsigned NumConcat = MaskNumElts / SrcNumElts; 3074 bool IsConcat = true; 3075 SmallVector<int, 8> ConcatSrcs(NumConcat, -1); 3076 for (unsigned i = 0; i != MaskNumElts; ++i) { 3077 int Idx = Mask[i]; 3078 if (Idx < 0) 3079 continue; 3080 // Ensure the indices in each SrcVT sized piece are sequential and that 3081 // the same source is used for the whole piece. 3082 if ((Idx % SrcNumElts != (i % SrcNumElts)) || 3083 (ConcatSrcs[i / SrcNumElts] >= 0 && 3084 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) { 3085 IsConcat = false; 3086 break; 3087 } 3088 // Remember which source this index came from. 3089 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; 3090 } 3091 3092 // The shuffle is concatenating multiple vectors together. Just emit 3093 // a CONCAT_VECTORS operation. 3094 if (IsConcat) { 3095 SmallVector<SDValue, 8> ConcatOps; 3096 for (auto Src : ConcatSrcs) { 3097 if (Src < 0) 3098 ConcatOps.push_back(DAG.getUNDEF(SrcVT)); 3099 else if (Src == 0) 3100 ConcatOps.push_back(Src1); 3101 else 3102 ConcatOps.push_back(Src2); 3103 } 3104 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps)); 3105 return; 3106 } 3107 } 3108 3109 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts); 3110 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts; 3111 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), 3112 PaddedMaskNumElts); 3113 3114 // Pad both vectors with undefs to make them the same length as the mask. 3115 SDValue UndefVal = DAG.getUNDEF(SrcVT); 3116 3117 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal); 3118 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal); 3119 MOps1[0] = Src1; 3120 MOps2[0] = Src2; 3121 3122 Src1 = Src1.isUndef() 3123 ? DAG.getUNDEF(PaddedVT) 3124 : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1); 3125 Src2 = Src2.isUndef() 3126 ? DAG.getUNDEF(PaddedVT) 3127 : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2); 3128 3129 // Readjust mask for new input vector length. 3130 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1); 3131 for (unsigned i = 0; i != MaskNumElts; ++i) { 3132 int Idx = Mask[i]; 3133 if (Idx >= (int)SrcNumElts) 3134 Idx -= SrcNumElts - PaddedMaskNumElts; 3135 MappedOps[i] = Idx; 3136 } 3137 3138 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps); 3139 3140 // If the concatenated vector was padded, extract a subvector with the 3141 // correct number of elements. 3142 if (MaskNumElts != PaddedMaskNumElts) 3143 Result = DAG.getNode( 3144 ISD::EXTRACT_SUBVECTOR, DL, VT, Result, 3145 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); 3146 3147 setValue(&I, Result); 3148 return; 3149 } 3150 3151 if (SrcNumElts > MaskNumElts) { 3152 // Analyze the access pattern of the vector to see if we can extract 3153 // two subvectors and do the shuffle. 3154 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from 3155 bool CanExtract = true; 3156 for (int Idx : Mask) { 3157 unsigned Input = 0; 3158 if (Idx < 0) 3159 continue; 3160 3161 if (Idx >= (int)SrcNumElts) { 3162 Input = 1; 3163 Idx -= SrcNumElts; 3164 } 3165 3166 // If all the indices come from the same MaskNumElts sized portion of 3167 // the sources we can use extract. Also make sure the extract wouldn't 3168 // extract past the end of the source. 3169 int NewStartIdx = alignDown(Idx, MaskNumElts); 3170 if (NewStartIdx + MaskNumElts > SrcNumElts || 3171 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx)) 3172 CanExtract = false; 3173 // Make sure we always update StartIdx as we use it to track if all 3174 // elements are undef. 3175 StartIdx[Input] = NewStartIdx; 3176 } 3177 3178 if (StartIdx[0] < 0 && StartIdx[1] < 0) { 3179 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used. 3180 return; 3181 } 3182 if (CanExtract) { 3183 // Extract appropriate subvector and generate a vector shuffle 3184 for (unsigned Input = 0; Input < 2; ++Input) { 3185 SDValue &Src = Input == 0 ? Src1 : Src2; 3186 if (StartIdx[Input] < 0) 3187 Src = DAG.getUNDEF(VT); 3188 else { 3189 Src = DAG.getNode( 3190 ISD::EXTRACT_SUBVECTOR, DL, VT, Src, 3191 DAG.getConstant(StartIdx[Input], DL, 3192 TLI.getVectorIdxTy(DAG.getDataLayout()))); 3193 } 3194 } 3195 3196 // Calculate new mask. 3197 SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end()); 3198 for (int &Idx : MappedOps) { 3199 if (Idx >= (int)SrcNumElts) 3200 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts; 3201 else if (Idx >= 0) 3202 Idx -= StartIdx[0]; 3203 } 3204 3205 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps)); 3206 return; 3207 } 3208 } 3209 3210 // We can't use either concat vectors or extract subvectors so fall back to 3211 // replacing the shuffle with extract and build vector. 3212 // to insert and build vector. 3213 EVT EltVT = VT.getVectorElementType(); 3214 EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout()); 3215 SmallVector<SDValue,8> Ops; 3216 for (int Idx : Mask) { 3217 SDValue Res; 3218 3219 if (Idx < 0) { 3220 Res = DAG.getUNDEF(EltVT); 3221 } else { 3222 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2; 3223 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts; 3224 3225 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 3226 EltVT, Src, DAG.getConstant(Idx, DL, IdxVT)); 3227 } 3228 3229 Ops.push_back(Res); 3230 } 3231 3232 setValue(&I, DAG.getBuildVector(VT, DL, Ops)); 3233 } 3234 3235 void SelectionDAGBuilder::visitInsertValue(const User &I) { 3236 ArrayRef<unsigned> Indices; 3237 if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I)) 3238 Indices = IV->getIndices(); 3239 else 3240 Indices = cast<ConstantExpr>(&I)->getIndices(); 3241 3242 const Value *Op0 = I.getOperand(0); 3243 const Value *Op1 = I.getOperand(1); 3244 Type *AggTy = I.getType(); 3245 Type *ValTy = Op1->getType(); 3246 bool IntoUndef = isa<UndefValue>(Op0); 3247 bool FromUndef = isa<UndefValue>(Op1); 3248 3249 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices); 3250 3251 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3252 SmallVector<EVT, 4> AggValueVTs; 3253 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs); 3254 SmallVector<EVT, 4> ValValueVTs; 3255 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs); 3256 3257 unsigned NumAggValues = AggValueVTs.size(); 3258 unsigned NumValValues = ValValueVTs.size(); 3259 SmallVector<SDValue, 4> Values(NumAggValues); 3260 3261 // Ignore an insertvalue that produces an empty object 3262 if (!NumAggValues) { 3263 setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); 3264 return; 3265 } 3266 3267 SDValue Agg = getValue(Op0); 3268 unsigned i = 0; 3269 // Copy the beginning value(s) from the original aggregate. 3270 for (; i != LinearIndex; ++i) 3271 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : 3272 SDValue(Agg.getNode(), Agg.getResNo() + i); 3273 // Copy values from the inserted value(s). 3274 if (NumValValues) { 3275 SDValue Val = getValue(Op1); 3276 for (; i != LinearIndex + NumValValues; ++i) 3277 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) : 3278 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex); 3279 } 3280 // Copy remaining value(s) from the original aggregate. 3281 for (; i != NumAggValues; ++i) 3282 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : 3283 SDValue(Agg.getNode(), Agg.getResNo() + i); 3284 3285 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 3286 DAG.getVTList(AggValueVTs), Values)); 3287 } 3288 3289 void SelectionDAGBuilder::visitExtractValue(const User &I) { 3290 ArrayRef<unsigned> Indices; 3291 if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I)) 3292 Indices = EV->getIndices(); 3293 else 3294 Indices = cast<ConstantExpr>(&I)->getIndices(); 3295 3296 const Value *Op0 = I.getOperand(0); 3297 Type *AggTy = Op0->getType(); 3298 Type *ValTy = I.getType(); 3299 bool OutOfUndef = isa<UndefValue>(Op0); 3300 3301 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices); 3302 3303 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3304 SmallVector<EVT, 4> ValValueVTs; 3305 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs); 3306 3307 unsigned NumValValues = ValValueVTs.size(); 3308 3309 // Ignore a extractvalue that produces an empty object 3310 if (!NumValValues) { 3311 setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); 3312 return; 3313 } 3314 3315 SmallVector<SDValue, 4> Values(NumValValues); 3316 3317 SDValue Agg = getValue(Op0); 3318 // Copy out the selected value(s). 3319 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i) 3320 Values[i - LinearIndex] = 3321 OutOfUndef ? 3322 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) : 3323 SDValue(Agg.getNode(), Agg.getResNo() + i); 3324 3325 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 3326 DAG.getVTList(ValValueVTs), Values)); 3327 } 3328 3329 void SelectionDAGBuilder::visitGetElementPtr(const User &I) { 3330 Value *Op0 = I.getOperand(0); 3331 // Note that the pointer operand may be a vector of pointers. Take the scalar 3332 // element which holds a pointer. 3333 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace(); 3334 SDValue N = getValue(Op0); 3335 SDLoc dl = getCurSDLoc(); 3336 3337 // Normalize Vector GEP - all scalar operands should be converted to the 3338 // splat vector. 3339 unsigned VectorWidth = I.getType()->isVectorTy() ? 3340 cast<VectorType>(I.getType())->getVectorNumElements() : 0; 3341 3342 if (VectorWidth && !N.getValueType().isVector()) { 3343 LLVMContext &Context = *DAG.getContext(); 3344 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth); 3345 N = DAG.getSplatBuildVector(VT, dl, N); 3346 } 3347 3348 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I); 3349 GTI != E; ++GTI) { 3350 const Value *Idx = GTI.getOperand(); 3351 if (StructType *StTy = GTI.getStructTypeOrNull()) { 3352 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 3353 if (Field) { 3354 // N = N + Offset 3355 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field); 3356 3357 // In an inbounds GEP with an offset that is nonnegative even when 3358 // interpreted as signed, assume there is no unsigned overflow. 3359 SDNodeFlags Flags; 3360 if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds()) 3361 Flags.setNoUnsignedWrap(true); 3362 3363 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, 3364 DAG.getConstant(Offset, dl, N.getValueType()), Flags); 3365 } 3366 } else { 3367 MVT PtrTy = 3368 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout(), AS); 3369 unsigned PtrSize = PtrTy.getSizeInBits(); 3370 APInt ElementSize(PtrSize, DL->getTypeAllocSize(GTI.getIndexedType())); 3371 3372 // If this is a scalar constant or a splat vector of constants, 3373 // handle it quickly. 3374 const auto *CI = dyn_cast<ConstantInt>(Idx); 3375 if (!CI && isa<ConstantDataVector>(Idx) && 3376 cast<ConstantDataVector>(Idx)->getSplatValue()) 3377 CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue()); 3378 3379 if (CI) { 3380 if (CI->isZero()) 3381 continue; 3382 APInt Offs = ElementSize * CI->getValue().sextOrTrunc(PtrSize); 3383 LLVMContext &Context = *DAG.getContext(); 3384 SDValue OffsVal = VectorWidth ? 3385 DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, PtrTy, VectorWidth)) : 3386 DAG.getConstant(Offs, dl, PtrTy); 3387 3388 // In an inbouds GEP with an offset that is nonnegative even when 3389 // interpreted as signed, assume there is no unsigned overflow. 3390 SDNodeFlags Flags; 3391 if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds()) 3392 Flags.setNoUnsignedWrap(true); 3393 3394 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags); 3395 continue; 3396 } 3397 3398 // N = N + Idx * ElementSize; 3399 SDValue IdxN = getValue(Idx); 3400 3401 if (!IdxN.getValueType().isVector() && VectorWidth) { 3402 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(), VectorWidth); 3403 IdxN = DAG.getSplatBuildVector(VT, dl, IdxN); 3404 } 3405 3406 // If the index is smaller or larger than intptr_t, truncate or extend 3407 // it. 3408 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType()); 3409 3410 // If this is a multiply by a power of two, turn it into a shl 3411 // immediately. This is a very common case. 3412 if (ElementSize != 1) { 3413 if (ElementSize.isPowerOf2()) { 3414 unsigned Amt = ElementSize.logBase2(); 3415 IdxN = DAG.getNode(ISD::SHL, dl, 3416 N.getValueType(), IdxN, 3417 DAG.getConstant(Amt, dl, IdxN.getValueType())); 3418 } else { 3419 SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType()); 3420 IdxN = DAG.getNode(ISD::MUL, dl, 3421 N.getValueType(), IdxN, Scale); 3422 } 3423 } 3424 3425 N = DAG.getNode(ISD::ADD, dl, 3426 N.getValueType(), N, IdxN); 3427 } 3428 } 3429 3430 setValue(&I, N); 3431 } 3432 3433 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) { 3434 // If this is a fixed sized alloca in the entry block of the function, 3435 // allocate it statically on the stack. 3436 if (FuncInfo.StaticAllocaMap.count(&I)) 3437 return; // getValue will auto-populate this. 3438 3439 SDLoc dl = getCurSDLoc(); 3440 Type *Ty = I.getAllocatedType(); 3441 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3442 auto &DL = DAG.getDataLayout(); 3443 uint64_t TySize = DL.getTypeAllocSize(Ty); 3444 unsigned Align = 3445 std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment()); 3446 3447 SDValue AllocSize = getValue(I.getArraySize()); 3448 3449 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout()); 3450 if (AllocSize.getValueType() != IntPtr) 3451 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr); 3452 3453 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, 3454 AllocSize, 3455 DAG.getConstant(TySize, dl, IntPtr)); 3456 3457 // Handle alignment. If the requested alignment is less than or equal to 3458 // the stack alignment, ignore it. If the size is greater than or equal to 3459 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. 3460 unsigned StackAlign = 3461 DAG.getSubtarget().getFrameLowering()->getStackAlignment(); 3462 if (Align <= StackAlign) 3463 Align = 0; 3464 3465 // Round the size of the allocation up to the stack alignment size 3466 // by add SA-1 to the size. This doesn't overflow because we're computing 3467 // an address inside an alloca. 3468 SDNodeFlags Flags; 3469 Flags.setNoUnsignedWrap(true); 3470 AllocSize = DAG.getNode(ISD::ADD, dl, 3471 AllocSize.getValueType(), AllocSize, 3472 DAG.getIntPtrConstant(StackAlign - 1, dl), Flags); 3473 3474 // Mask out the low bits for alignment purposes. 3475 AllocSize = DAG.getNode(ISD::AND, dl, 3476 AllocSize.getValueType(), AllocSize, 3477 DAG.getIntPtrConstant(~(uint64_t)(StackAlign - 1), 3478 dl)); 3479 3480 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align, dl) }; 3481 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other); 3482 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops); 3483 setValue(&I, DSA); 3484 DAG.setRoot(DSA.getValue(1)); 3485 3486 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects()); 3487 } 3488 3489 void SelectionDAGBuilder::visitLoad(const LoadInst &I) { 3490 if (I.isAtomic()) 3491 return visitAtomicLoad(I); 3492 3493 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3494 const Value *SV = I.getOperand(0); 3495 if (TLI.supportSwiftError()) { 3496 // Swifterror values can come from either a function parameter with 3497 // swifterror attribute or an alloca with swifterror attribute. 3498 if (const Argument *Arg = dyn_cast<Argument>(SV)) { 3499 if (Arg->hasSwiftErrorAttr()) 3500 return visitLoadFromSwiftError(I); 3501 } 3502 3503 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) { 3504 if (Alloca->isSwiftError()) 3505 return visitLoadFromSwiftError(I); 3506 } 3507 } 3508 3509 SDValue Ptr = getValue(SV); 3510 3511 Type *Ty = I.getType(); 3512 3513 bool isVolatile = I.isVolatile(); 3514 bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr; 3515 bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr; 3516 bool isDereferenceable = isDereferenceablePointer(SV, DAG.getDataLayout()); 3517 unsigned Alignment = I.getAlignment(); 3518 3519 AAMDNodes AAInfo; 3520 I.getAAMetadata(AAInfo); 3521 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); 3522 3523 SmallVector<EVT, 4> ValueVTs; 3524 SmallVector<uint64_t, 4> Offsets; 3525 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets); 3526 unsigned NumValues = ValueVTs.size(); 3527 if (NumValues == 0) 3528 return; 3529 3530 SDValue Root; 3531 bool ConstantMemory = false; 3532 if (isVolatile || NumValues > MaxParallelChains) 3533 // Serialize volatile loads with other side effects. 3534 Root = getRoot(); 3535 else if (AA && AA->pointsToConstantMemory(MemoryLocation( 3536 SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) { 3537 // Do not serialize (non-volatile) loads of constant memory with anything. 3538 Root = DAG.getEntryNode(); 3539 ConstantMemory = true; 3540 } else { 3541 // Do not serialize non-volatile loads against each other. 3542 Root = DAG.getRoot(); 3543 } 3544 3545 SDLoc dl = getCurSDLoc(); 3546 3547 if (isVolatile) 3548 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG); 3549 3550 // An aggregate load cannot wrap around the address space, so offsets to its 3551 // parts don't wrap either. 3552 SDNodeFlags Flags; 3553 Flags.setNoUnsignedWrap(true); 3554 3555 SmallVector<SDValue, 4> Values(NumValues); 3556 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues)); 3557 EVT PtrVT = Ptr.getValueType(); 3558 unsigned ChainI = 0; 3559 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { 3560 // Serializing loads here may result in excessive register pressure, and 3561 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling 3562 // could recover a bit by hoisting nodes upward in the chain by recognizing 3563 // they are side-effect free or do not alias. The optimizer should really 3564 // avoid this case by converting large object/array copies to llvm.memcpy 3565 // (MaxParallelChains should always remain as failsafe). 3566 if (ChainI == MaxParallelChains) { 3567 assert(PendingLoads.empty() && "PendingLoads must be serialized first"); 3568 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3569 makeArrayRef(Chains.data(), ChainI)); 3570 Root = Chain; 3571 ChainI = 0; 3572 } 3573 SDValue A = DAG.getNode(ISD::ADD, dl, 3574 PtrVT, Ptr, 3575 DAG.getConstant(Offsets[i], dl, PtrVT), 3576 Flags); 3577 auto MMOFlags = MachineMemOperand::MONone; 3578 if (isVolatile) 3579 MMOFlags |= MachineMemOperand::MOVolatile; 3580 if (isNonTemporal) 3581 MMOFlags |= MachineMemOperand::MONonTemporal; 3582 if (isInvariant) 3583 MMOFlags |= MachineMemOperand::MOInvariant; 3584 if (isDereferenceable) 3585 MMOFlags |= MachineMemOperand::MODereferenceable; 3586 MMOFlags |= TLI.getMMOFlags(I); 3587 3588 SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, A, 3589 MachinePointerInfo(SV, Offsets[i]), Alignment, 3590 MMOFlags, AAInfo, Ranges); 3591 3592 Values[i] = L; 3593 Chains[ChainI] = L.getValue(1); 3594 } 3595 3596 if (!ConstantMemory) { 3597 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3598 makeArrayRef(Chains.data(), ChainI)); 3599 if (isVolatile) 3600 DAG.setRoot(Chain); 3601 else 3602 PendingLoads.push_back(Chain); 3603 } 3604 3605 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl, 3606 DAG.getVTList(ValueVTs), Values)); 3607 } 3608 3609 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) { 3610 assert(DAG.getTargetLoweringInfo().supportSwiftError() && 3611 "call visitStoreToSwiftError when backend supports swifterror"); 3612 3613 SmallVector<EVT, 4> ValueVTs; 3614 SmallVector<uint64_t, 4> Offsets; 3615 const Value *SrcV = I.getOperand(0); 3616 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), 3617 SrcV->getType(), ValueVTs, &Offsets); 3618 assert(ValueVTs.size() == 1 && Offsets[0] == 0 && 3619 "expect a single EVT for swifterror"); 3620 3621 SDValue Src = getValue(SrcV); 3622 // Create a virtual register, then update the virtual register. 3623 unsigned VReg; bool CreatedVReg; 3624 std::tie(VReg, CreatedVReg) = FuncInfo.getOrCreateSwiftErrorVRegDefAt(&I); 3625 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue 3626 // Chain can be getRoot or getControlRoot. 3627 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg, 3628 SDValue(Src.getNode(), Src.getResNo())); 3629 DAG.setRoot(CopyNode); 3630 if (CreatedVReg) 3631 FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg); 3632 } 3633 3634 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) { 3635 assert(DAG.getTargetLoweringInfo().supportSwiftError() && 3636 "call visitLoadFromSwiftError when backend supports swifterror"); 3637 3638 assert(!I.isVolatile() && 3639 I.getMetadata(LLVMContext::MD_nontemporal) == nullptr && 3640 I.getMetadata(LLVMContext::MD_invariant_load) == nullptr && 3641 "Support volatile, non temporal, invariant for load_from_swift_error"); 3642 3643 const Value *SV = I.getOperand(0); 3644 Type *Ty = I.getType(); 3645 AAMDNodes AAInfo; 3646 I.getAAMetadata(AAInfo); 3647 assert((!AA || !AA->pointsToConstantMemory(MemoryLocation( 3648 SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) && 3649 "load_from_swift_error should not be constant memory"); 3650 3651 SmallVector<EVT, 4> ValueVTs; 3652 SmallVector<uint64_t, 4> Offsets; 3653 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty, 3654 ValueVTs, &Offsets); 3655 assert(ValueVTs.size() == 1 && Offsets[0] == 0 && 3656 "expect a single EVT for swifterror"); 3657 3658 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT 3659 SDValue L = DAG.getCopyFromReg( 3660 getRoot(), getCurSDLoc(), 3661 FuncInfo.getOrCreateSwiftErrorVRegUseAt(&I, FuncInfo.MBB, SV).first, 3662 ValueVTs[0]); 3663 3664 setValue(&I, L); 3665 } 3666 3667 void SelectionDAGBuilder::visitStore(const StoreInst &I) { 3668 if (I.isAtomic()) 3669 return visitAtomicStore(I); 3670 3671 const Value *SrcV = I.getOperand(0); 3672 const Value *PtrV = I.getOperand(1); 3673 3674 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3675 if (TLI.supportSwiftError()) { 3676 // Swifterror values can come from either a function parameter with 3677 // swifterror attribute or an alloca with swifterror attribute. 3678 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) { 3679 if (Arg->hasSwiftErrorAttr()) 3680 return visitStoreToSwiftError(I); 3681 } 3682 3683 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) { 3684 if (Alloca->isSwiftError()) 3685 return visitStoreToSwiftError(I); 3686 } 3687 } 3688 3689 SmallVector<EVT, 4> ValueVTs; 3690 SmallVector<uint64_t, 4> Offsets; 3691 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), 3692 SrcV->getType(), ValueVTs, &Offsets); 3693 unsigned NumValues = ValueVTs.size(); 3694 if (NumValues == 0) 3695 return; 3696 3697 // Get the lowered operands. Note that we do this after 3698 // checking if NumResults is zero, because with zero results 3699 // the operands won't have values in the map. 3700 SDValue Src = getValue(SrcV); 3701 SDValue Ptr = getValue(PtrV); 3702 3703 SDValue Root = getRoot(); 3704 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues)); 3705 SDLoc dl = getCurSDLoc(); 3706 EVT PtrVT = Ptr.getValueType(); 3707 unsigned Alignment = I.getAlignment(); 3708 AAMDNodes AAInfo; 3709 I.getAAMetadata(AAInfo); 3710 3711 auto MMOFlags = MachineMemOperand::MONone; 3712 if (I.isVolatile()) 3713 MMOFlags |= MachineMemOperand::MOVolatile; 3714 if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr) 3715 MMOFlags |= MachineMemOperand::MONonTemporal; 3716 MMOFlags |= TLI.getMMOFlags(I); 3717 3718 // An aggregate load cannot wrap around the address space, so offsets to its 3719 // parts don't wrap either. 3720 SDNodeFlags Flags; 3721 Flags.setNoUnsignedWrap(true); 3722 3723 unsigned ChainI = 0; 3724 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { 3725 // See visitLoad comments. 3726 if (ChainI == MaxParallelChains) { 3727 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3728 makeArrayRef(Chains.data(), ChainI)); 3729 Root = Chain; 3730 ChainI = 0; 3731 } 3732 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, 3733 DAG.getConstant(Offsets[i], dl, PtrVT), Flags); 3734 SDValue St = DAG.getStore( 3735 Root, dl, SDValue(Src.getNode(), Src.getResNo() + i), Add, 3736 MachinePointerInfo(PtrV, Offsets[i]), Alignment, MMOFlags, AAInfo); 3737 Chains[ChainI] = St; 3738 } 3739 3740 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3741 makeArrayRef(Chains.data(), ChainI)); 3742 DAG.setRoot(StoreNode); 3743 } 3744 3745 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I, 3746 bool IsCompressing) { 3747 SDLoc sdl = getCurSDLoc(); 3748 3749 auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, 3750 unsigned& Alignment) { 3751 // llvm.masked.store.*(Src0, Ptr, alignment, Mask) 3752 Src0 = I.getArgOperand(0); 3753 Ptr = I.getArgOperand(1); 3754 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue(); 3755 Mask = I.getArgOperand(3); 3756 }; 3757 auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, 3758 unsigned& Alignment) { 3759 // llvm.masked.compressstore.*(Src0, Ptr, Mask) 3760 Src0 = I.getArgOperand(0); 3761 Ptr = I.getArgOperand(1); 3762 Mask = I.getArgOperand(2); 3763 Alignment = 0; 3764 }; 3765 3766 Value *PtrOperand, *MaskOperand, *Src0Operand; 3767 unsigned Alignment; 3768 if (IsCompressing) 3769 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 3770 else 3771 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 3772 3773 SDValue Ptr = getValue(PtrOperand); 3774 SDValue Src0 = getValue(Src0Operand); 3775 SDValue Mask = getValue(MaskOperand); 3776 3777 EVT VT = Src0.getValueType(); 3778 if (!Alignment) 3779 Alignment = DAG.getEVTAlignment(VT); 3780 3781 AAMDNodes AAInfo; 3782 I.getAAMetadata(AAInfo); 3783 3784 MachineMemOperand *MMO = 3785 DAG.getMachineFunction(). 3786 getMachineMemOperand(MachinePointerInfo(PtrOperand), 3787 MachineMemOperand::MOStore, VT.getStoreSize(), 3788 Alignment, AAInfo); 3789 SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT, 3790 MMO, false /* Truncating */, 3791 IsCompressing); 3792 DAG.setRoot(StoreNode); 3793 setValue(&I, StoreNode); 3794 } 3795 3796 // Get a uniform base for the Gather/Scatter intrinsic. 3797 // The first argument of the Gather/Scatter intrinsic is a vector of pointers. 3798 // We try to represent it as a base pointer + vector of indices. 3799 // Usually, the vector of pointers comes from a 'getelementptr' instruction. 3800 // The first operand of the GEP may be a single pointer or a vector of pointers 3801 // Example: 3802 // %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind 3803 // or 3804 // %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind 3805 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, .. 3806 // 3807 // When the first GEP operand is a single pointer - it is the uniform base we 3808 // are looking for. If first operand of the GEP is a splat vector - we 3809 // extract the spalt value and use it as a uniform base. 3810 // In all other cases the function returns 'false'. 3811 // 3812 static bool getUniformBase(const Value* &Ptr, SDValue& Base, SDValue& Index, 3813 SelectionDAGBuilder* SDB) { 3814 3815 SelectionDAG& DAG = SDB->DAG; 3816 LLVMContext &Context = *DAG.getContext(); 3817 3818 assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type"); 3819 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 3820 if (!GEP || GEP->getNumOperands() > 2) 3821 return false; 3822 3823 const Value *GEPPtr = GEP->getPointerOperand(); 3824 if (!GEPPtr->getType()->isVectorTy()) 3825 Ptr = GEPPtr; 3826 else if (!(Ptr = getSplatValue(GEPPtr))) 3827 return false; 3828 3829 Value *IndexVal = GEP->getOperand(1); 3830 3831 // The operands of the GEP may be defined in another basic block. 3832 // In this case we'll not find nodes for the operands. 3833 if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal)) 3834 return false; 3835 3836 Base = SDB->getValue(Ptr); 3837 Index = SDB->getValue(IndexVal); 3838 3839 // Suppress sign extension. 3840 if (SExtInst* Sext = dyn_cast<SExtInst>(IndexVal)) { 3841 if (SDB->findValue(Sext->getOperand(0))) { 3842 IndexVal = Sext->getOperand(0); 3843 Index = SDB->getValue(IndexVal); 3844 } 3845 } 3846 if (!Index.getValueType().isVector()) { 3847 unsigned GEPWidth = GEP->getType()->getVectorNumElements(); 3848 EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth); 3849 Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index); 3850 } 3851 return true; 3852 } 3853 3854 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) { 3855 SDLoc sdl = getCurSDLoc(); 3856 3857 // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask) 3858 const Value *Ptr = I.getArgOperand(1); 3859 SDValue Src0 = getValue(I.getArgOperand(0)); 3860 SDValue Mask = getValue(I.getArgOperand(3)); 3861 EVT VT = Src0.getValueType(); 3862 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue(); 3863 if (!Alignment) 3864 Alignment = DAG.getEVTAlignment(VT); 3865 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3866 3867 AAMDNodes AAInfo; 3868 I.getAAMetadata(AAInfo); 3869 3870 SDValue Base; 3871 SDValue Index; 3872 const Value *BasePtr = Ptr; 3873 bool UniformBase = getUniformBase(BasePtr, Base, Index, this); 3874 3875 const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr; 3876 MachineMemOperand *MMO = DAG.getMachineFunction(). 3877 getMachineMemOperand(MachinePointerInfo(MemOpBasePtr), 3878 MachineMemOperand::MOStore, VT.getStoreSize(), 3879 Alignment, AAInfo); 3880 if (!UniformBase) { 3881 Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); 3882 Index = getValue(Ptr); 3883 } 3884 SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index }; 3885 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl, 3886 Ops, MMO); 3887 DAG.setRoot(Scatter); 3888 setValue(&I, Scatter); 3889 } 3890 3891 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) { 3892 SDLoc sdl = getCurSDLoc(); 3893 3894 auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, 3895 unsigned& Alignment) { 3896 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0) 3897 Ptr = I.getArgOperand(0); 3898 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); 3899 Mask = I.getArgOperand(2); 3900 Src0 = I.getArgOperand(3); 3901 }; 3902 auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, 3903 unsigned& Alignment) { 3904 // @llvm.masked.expandload.*(Ptr, Mask, Src0) 3905 Ptr = I.getArgOperand(0); 3906 Alignment = 0; 3907 Mask = I.getArgOperand(1); 3908 Src0 = I.getArgOperand(2); 3909 }; 3910 3911 Value *PtrOperand, *MaskOperand, *Src0Operand; 3912 unsigned Alignment; 3913 if (IsExpanding) 3914 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 3915 else 3916 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 3917 3918 SDValue Ptr = getValue(PtrOperand); 3919 SDValue Src0 = getValue(Src0Operand); 3920 SDValue Mask = getValue(MaskOperand); 3921 3922 EVT VT = Src0.getValueType(); 3923 if (!Alignment) 3924 Alignment = DAG.getEVTAlignment(VT); 3925 3926 AAMDNodes AAInfo; 3927 I.getAAMetadata(AAInfo); 3928 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); 3929 3930 // Do not serialize masked loads of constant memory with anything. 3931 bool AddToChain = !AA || !AA->pointsToConstantMemory(MemoryLocation( 3932 PtrOperand, DAG.getDataLayout().getTypeStoreSize(I.getType()), AAInfo)); 3933 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); 3934 3935 MachineMemOperand *MMO = 3936 DAG.getMachineFunction(). 3937 getMachineMemOperand(MachinePointerInfo(PtrOperand), 3938 MachineMemOperand::MOLoad, VT.getStoreSize(), 3939 Alignment, AAInfo, Ranges); 3940 3941 SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO, 3942 ISD::NON_EXTLOAD, IsExpanding); 3943 if (AddToChain) { 3944 SDValue OutChain = Load.getValue(1); 3945 DAG.setRoot(OutChain); 3946 } 3947 setValue(&I, Load); 3948 } 3949 3950 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) { 3951 SDLoc sdl = getCurSDLoc(); 3952 3953 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0) 3954 const Value *Ptr = I.getArgOperand(0); 3955 SDValue Src0 = getValue(I.getArgOperand(3)); 3956 SDValue Mask = getValue(I.getArgOperand(2)); 3957 3958 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3959 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3960 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue(); 3961 if (!Alignment) 3962 Alignment = DAG.getEVTAlignment(VT); 3963 3964 AAMDNodes AAInfo; 3965 I.getAAMetadata(AAInfo); 3966 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); 3967 3968 SDValue Root = DAG.getRoot(); 3969 SDValue Base; 3970 SDValue Index; 3971 const Value *BasePtr = Ptr; 3972 bool UniformBase = getUniformBase(BasePtr, Base, Index, this); 3973 bool ConstantMemory = false; 3974 if (UniformBase && 3975 AA && AA->pointsToConstantMemory(MemoryLocation( 3976 BasePtr, DAG.getDataLayout().getTypeStoreSize(I.getType()), 3977 AAInfo))) { 3978 // Do not serialize (non-volatile) loads of constant memory with anything. 3979 Root = DAG.getEntryNode(); 3980 ConstantMemory = true; 3981 } 3982 3983 MachineMemOperand *MMO = 3984 DAG.getMachineFunction(). 3985 getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr), 3986 MachineMemOperand::MOLoad, VT.getStoreSize(), 3987 Alignment, AAInfo, Ranges); 3988 3989 if (!UniformBase) { 3990 Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); 3991 Index = getValue(Ptr); 3992 } 3993 SDValue Ops[] = { Root, Src0, Mask, Base, Index }; 3994 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl, 3995 Ops, MMO); 3996 3997 SDValue OutChain = Gather.getValue(1); 3998 if (!ConstantMemory) 3999 PendingLoads.push_back(OutChain); 4000 setValue(&I, Gather); 4001 } 4002 4003 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) { 4004 SDLoc dl = getCurSDLoc(); 4005 AtomicOrdering SuccessOrder = I.getSuccessOrdering(); 4006 AtomicOrdering FailureOrder = I.getFailureOrdering(); 4007 SyncScope::ID SSID = I.getSyncScopeID(); 4008 4009 SDValue InChain = getRoot(); 4010 4011 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType(); 4012 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other); 4013 SDValue L = DAG.getAtomicCmpSwap( 4014 ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain, 4015 getValue(I.getPointerOperand()), getValue(I.getCompareOperand()), 4016 getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()), 4017 /*Alignment=*/ 0, SuccessOrder, FailureOrder, SSID); 4018 4019 SDValue OutChain = L.getValue(2); 4020 4021 setValue(&I, L); 4022 DAG.setRoot(OutChain); 4023 } 4024 4025 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) { 4026 SDLoc dl = getCurSDLoc(); 4027 ISD::NodeType NT; 4028 switch (I.getOperation()) { 4029 default: llvm_unreachable("Unknown atomicrmw operation"); 4030 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break; 4031 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break; 4032 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break; 4033 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break; 4034 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break; 4035 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break; 4036 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break; 4037 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break; 4038 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break; 4039 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break; 4040 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break; 4041 } 4042 AtomicOrdering Order = I.getOrdering(); 4043 SyncScope::ID SSID = I.getSyncScopeID(); 4044 4045 SDValue InChain = getRoot(); 4046 4047 SDValue L = 4048 DAG.getAtomic(NT, dl, 4049 getValue(I.getValOperand()).getSimpleValueType(), 4050 InChain, 4051 getValue(I.getPointerOperand()), 4052 getValue(I.getValOperand()), 4053 I.getPointerOperand(), 4054 /* Alignment=*/ 0, Order, SSID); 4055 4056 SDValue OutChain = L.getValue(1); 4057 4058 setValue(&I, L); 4059 DAG.setRoot(OutChain); 4060 } 4061 4062 void SelectionDAGBuilder::visitFence(const FenceInst &I) { 4063 SDLoc dl = getCurSDLoc(); 4064 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4065 SDValue Ops[3]; 4066 Ops[0] = getRoot(); 4067 Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl, 4068 TLI.getFenceOperandTy(DAG.getDataLayout())); 4069 Ops[2] = DAG.getConstant(I.getSyncScopeID(), dl, 4070 TLI.getFenceOperandTy(DAG.getDataLayout())); 4071 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops)); 4072 } 4073 4074 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) { 4075 SDLoc dl = getCurSDLoc(); 4076 AtomicOrdering Order = I.getOrdering(); 4077 SyncScope::ID SSID = I.getSyncScopeID(); 4078 4079 SDValue InChain = getRoot(); 4080 4081 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4082 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 4083 4084 if (I.getAlignment() < VT.getSizeInBits() / 8) 4085 report_fatal_error("Cannot generate unaligned atomic load"); 4086 4087 MachineMemOperand *MMO = 4088 DAG.getMachineFunction(). 4089 getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), 4090 MachineMemOperand::MOVolatile | 4091 MachineMemOperand::MOLoad, 4092 VT.getStoreSize(), 4093 I.getAlignment() ? I.getAlignment() : 4094 DAG.getEVTAlignment(VT), 4095 AAMDNodes(), nullptr, SSID, Order); 4096 4097 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG); 4098 SDValue L = 4099 DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain, 4100 getValue(I.getPointerOperand()), MMO); 4101 4102 SDValue OutChain = L.getValue(1); 4103 4104 setValue(&I, L); 4105 DAG.setRoot(OutChain); 4106 } 4107 4108 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) { 4109 SDLoc dl = getCurSDLoc(); 4110 4111 AtomicOrdering Order = I.getOrdering(); 4112 SyncScope::ID SSID = I.getSyncScopeID(); 4113 4114 SDValue InChain = getRoot(); 4115 4116 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4117 EVT VT = 4118 TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType()); 4119 4120 if (I.getAlignment() < VT.getSizeInBits() / 8) 4121 report_fatal_error("Cannot generate unaligned atomic store"); 4122 4123 SDValue OutChain = 4124 DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT, 4125 InChain, 4126 getValue(I.getPointerOperand()), 4127 getValue(I.getValueOperand()), 4128 I.getPointerOperand(), I.getAlignment(), 4129 Order, SSID); 4130 4131 DAG.setRoot(OutChain); 4132 } 4133 4134 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 4135 /// node. 4136 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I, 4137 unsigned Intrinsic) { 4138 // Ignore the callsite's attributes. A specific call site may be marked with 4139 // readnone, but the lowering code will expect the chain based on the 4140 // definition. 4141 const Function *F = I.getCalledFunction(); 4142 bool HasChain = !F->doesNotAccessMemory(); 4143 bool OnlyLoad = HasChain && F->onlyReadsMemory(); 4144 4145 // Build the operand list. 4146 SmallVector<SDValue, 8> Ops; 4147 if (HasChain) { // If this intrinsic has side-effects, chainify it. 4148 if (OnlyLoad) { 4149 // We don't need to serialize loads against other loads. 4150 Ops.push_back(DAG.getRoot()); 4151 } else { 4152 Ops.push_back(getRoot()); 4153 } 4154 } 4155 4156 // Info is set by getTgtMemInstrinsic 4157 TargetLowering::IntrinsicInfo Info; 4158 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4159 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic); 4160 4161 // Add the intrinsic ID as an integer operand if it's not a target intrinsic. 4162 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID || 4163 Info.opc == ISD::INTRINSIC_W_CHAIN) 4164 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(), 4165 TLI.getPointerTy(DAG.getDataLayout()))); 4166 4167 // Add all operands of the call to the operand list. 4168 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { 4169 SDValue Op = getValue(I.getArgOperand(i)); 4170 Ops.push_back(Op); 4171 } 4172 4173 SmallVector<EVT, 4> ValueVTs; 4174 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs); 4175 4176 if (HasChain) 4177 ValueVTs.push_back(MVT::Other); 4178 4179 SDVTList VTs = DAG.getVTList(ValueVTs); 4180 4181 // Create the node. 4182 SDValue Result; 4183 if (IsTgtIntrinsic) { 4184 // This is target intrinsic that touches memory 4185 Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), 4186 VTs, Ops, Info.memVT, 4187 MachinePointerInfo(Info.ptrVal, Info.offset), 4188 Info.align, Info.vol, 4189 Info.readMem, Info.writeMem, Info.size); 4190 } else if (!HasChain) { 4191 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops); 4192 } else if (!I.getType()->isVoidTy()) { 4193 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops); 4194 } else { 4195 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops); 4196 } 4197 4198 if (HasChain) { 4199 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1); 4200 if (OnlyLoad) 4201 PendingLoads.push_back(Chain); 4202 else 4203 DAG.setRoot(Chain); 4204 } 4205 4206 if (!I.getType()->isVoidTy()) { 4207 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) { 4208 EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy); 4209 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result); 4210 } else 4211 Result = lowerRangeToAssertZExt(DAG, I, Result); 4212 4213 setValue(&I, Result); 4214 } 4215 } 4216 4217 /// GetSignificand - Get the significand and build it into a floating-point 4218 /// number with exponent of 1: 4219 /// 4220 /// Op = (Op & 0x007fffff) | 0x3f800000; 4221 /// 4222 /// where Op is the hexadecimal representation of floating point value. 4223 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) { 4224 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, 4225 DAG.getConstant(0x007fffff, dl, MVT::i32)); 4226 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1, 4227 DAG.getConstant(0x3f800000, dl, MVT::i32)); 4228 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2); 4229 } 4230 4231 /// GetExponent - Get the exponent: 4232 /// 4233 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127); 4234 /// 4235 /// where Op is the hexadecimal representation of floating point value. 4236 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, 4237 const TargetLowering &TLI, const SDLoc &dl) { 4238 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, 4239 DAG.getConstant(0x7f800000, dl, MVT::i32)); 4240 SDValue t1 = DAG.getNode( 4241 ISD::SRL, dl, MVT::i32, t0, 4242 DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout()))); 4243 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1, 4244 DAG.getConstant(127, dl, MVT::i32)); 4245 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2); 4246 } 4247 4248 /// getF32Constant - Get 32-bit floating point constant. 4249 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, 4250 const SDLoc &dl) { 4251 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl, 4252 MVT::f32); 4253 } 4254 4255 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, 4256 SelectionDAG &DAG) { 4257 // TODO: What fast-math-flags should be set on the floating-point nodes? 4258 4259 // IntegerPartOfX = ((int32_t)(t0); 4260 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0); 4261 4262 // FractionalPartOfX = t0 - (float)IntegerPartOfX; 4263 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); 4264 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1); 4265 4266 // IntegerPartOfX <<= 23; 4267 IntegerPartOfX = DAG.getNode( 4268 ISD::SHL, dl, MVT::i32, IntegerPartOfX, 4269 DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy( 4270 DAG.getDataLayout()))); 4271 4272 SDValue TwoToFractionalPartOfX; 4273 if (LimitFloatPrecision <= 6) { 4274 // For floating-point precision of 6: 4275 // 4276 // TwoToFractionalPartOfX = 4277 // 0.997535578f + 4278 // (0.735607626f + 0.252464424f * x) * x; 4279 // 4280 // error 0.0144103317, which is 6 bits 4281 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4282 getF32Constant(DAG, 0x3e814304, dl)); 4283 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4284 getF32Constant(DAG, 0x3f3c50c8, dl)); 4285 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4286 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4287 getF32Constant(DAG, 0x3f7f5e7e, dl)); 4288 } else if (LimitFloatPrecision <= 12) { 4289 // For floating-point precision of 12: 4290 // 4291 // TwoToFractionalPartOfX = 4292 // 0.999892986f + 4293 // (0.696457318f + 4294 // (0.224338339f + 0.792043434e-1f * x) * x) * x; 4295 // 4296 // error 0.000107046256, which is 13 to 14 bits 4297 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4298 getF32Constant(DAG, 0x3da235e3, dl)); 4299 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4300 getF32Constant(DAG, 0x3e65b8f3, dl)); 4301 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4302 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4303 getF32Constant(DAG, 0x3f324b07, dl)); 4304 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4305 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 4306 getF32Constant(DAG, 0x3f7ff8fd, dl)); 4307 } else { // LimitFloatPrecision <= 18 4308 // For floating-point precision of 18: 4309 // 4310 // TwoToFractionalPartOfX = 4311 // 0.999999982f + 4312 // (0.693148872f + 4313 // (0.240227044f + 4314 // (0.554906021e-1f + 4315 // (0.961591928e-2f + 4316 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x; 4317 // error 2.47208000*10^(-7), which is better than 18 bits 4318 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4319 getF32Constant(DAG, 0x3924b03e, dl)); 4320 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4321 getF32Constant(DAG, 0x3ab24b87, dl)); 4322 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4323 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4324 getF32Constant(DAG, 0x3c1d8c17, dl)); 4325 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4326 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 4327 getF32Constant(DAG, 0x3d634a1d, dl)); 4328 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 4329 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 4330 getF32Constant(DAG, 0x3e75fe14, dl)); 4331 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 4332 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10, 4333 getF32Constant(DAG, 0x3f317234, dl)); 4334 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); 4335 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, 4336 getF32Constant(DAG, 0x3f800000, dl)); 4337 } 4338 4339 // Add the exponent into the result in integer domain. 4340 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX); 4341 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 4342 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX)); 4343 } 4344 4345 /// expandExp - Lower an exp intrinsic. Handles the special sequences for 4346 /// limited-precision mode. 4347 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 4348 const TargetLowering &TLI) { 4349 if (Op.getValueType() == MVT::f32 && 4350 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4351 4352 // Put the exponent in the right bit position for later addition to the 4353 // final result: 4354 // 4355 // #define LOG2OFe 1.4426950f 4356 // t0 = Op * LOG2OFe 4357 4358 // TODO: What fast-math-flags should be set here? 4359 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op, 4360 getF32Constant(DAG, 0x3fb8aa3b, dl)); 4361 return getLimitedPrecisionExp2(t0, dl, DAG); 4362 } 4363 4364 // No special expansion. 4365 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op); 4366 } 4367 4368 /// expandLog - Lower a log intrinsic. Handles the special sequences for 4369 /// limited-precision mode. 4370 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 4371 const TargetLowering &TLI) { 4372 4373 // TODO: What fast-math-flags should be set on the floating-point nodes? 4374 4375 if (Op.getValueType() == MVT::f32 && 4376 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4377 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 4378 4379 // Scale the exponent by log(2) [0.69314718f]. 4380 SDValue Exp = GetExponent(DAG, Op1, TLI, dl); 4381 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, 4382 getF32Constant(DAG, 0x3f317218, dl)); 4383 4384 // Get the significand and build it into a floating-point number with 4385 // exponent of 1. 4386 SDValue X = GetSignificand(DAG, Op1, dl); 4387 4388 SDValue LogOfMantissa; 4389 if (LimitFloatPrecision <= 6) { 4390 // For floating-point precision of 6: 4391 // 4392 // LogofMantissa = 4393 // -1.1609546f + 4394 // (1.4034025f - 0.23903021f * x) * x; 4395 // 4396 // error 0.0034276066, which is better than 8 bits 4397 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4398 getF32Constant(DAG, 0xbe74c456, dl)); 4399 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4400 getF32Constant(DAG, 0x3fb3a2b1, dl)); 4401 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4402 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4403 getF32Constant(DAG, 0x3f949a29, dl)); 4404 } else if (LimitFloatPrecision <= 12) { 4405 // For floating-point precision of 12: 4406 // 4407 // LogOfMantissa = 4408 // -1.7417939f + 4409 // (2.8212026f + 4410 // (-1.4699568f + 4411 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x; 4412 // 4413 // error 0.000061011436, which is 14 bits 4414 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4415 getF32Constant(DAG, 0xbd67b6d6, dl)); 4416 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4417 getF32Constant(DAG, 0x3ee4f4b8, dl)); 4418 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4419 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4420 getF32Constant(DAG, 0x3fbc278b, dl)); 4421 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4422 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4423 getF32Constant(DAG, 0x40348e95, dl)); 4424 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4425 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 4426 getF32Constant(DAG, 0x3fdef31a, dl)); 4427 } else { // LimitFloatPrecision <= 18 4428 // For floating-point precision of 18: 4429 // 4430 // LogOfMantissa = 4431 // -2.1072184f + 4432 // (4.2372794f + 4433 // (-3.7029485f + 4434 // (2.2781945f + 4435 // (-0.87823314f + 4436 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x; 4437 // 4438 // error 0.0000023660568, which is better than 18 bits 4439 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4440 getF32Constant(DAG, 0xbc91e5ac, dl)); 4441 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4442 getF32Constant(DAG, 0x3e4350aa, dl)); 4443 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4444 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4445 getF32Constant(DAG, 0x3f60d3e3, dl)); 4446 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4447 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4448 getF32Constant(DAG, 0x4011cdf0, dl)); 4449 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4450 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 4451 getF32Constant(DAG, 0x406cfd1c, dl)); 4452 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 4453 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 4454 getF32Constant(DAG, 0x408797cb, dl)); 4455 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 4456 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, 4457 getF32Constant(DAG, 0x4006dcab, dl)); 4458 } 4459 4460 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa); 4461 } 4462 4463 // No special expansion. 4464 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op); 4465 } 4466 4467 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for 4468 /// limited-precision mode. 4469 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 4470 const TargetLowering &TLI) { 4471 4472 // TODO: What fast-math-flags should be set on the floating-point nodes? 4473 4474 if (Op.getValueType() == MVT::f32 && 4475 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4476 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 4477 4478 // Get the exponent. 4479 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl); 4480 4481 // Get the significand and build it into a floating-point number with 4482 // exponent of 1. 4483 SDValue X = GetSignificand(DAG, Op1, dl); 4484 4485 // Different possible minimax approximations of significand in 4486 // floating-point for various degrees of accuracy over [1,2]. 4487 SDValue Log2ofMantissa; 4488 if (LimitFloatPrecision <= 6) { 4489 // For floating-point precision of 6: 4490 // 4491 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x; 4492 // 4493 // error 0.0049451742, which is more than 7 bits 4494 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4495 getF32Constant(DAG, 0xbeb08fe0, dl)); 4496 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4497 getF32Constant(DAG, 0x40019463, dl)); 4498 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4499 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4500 getF32Constant(DAG, 0x3fd6633d, dl)); 4501 } else if (LimitFloatPrecision <= 12) { 4502 // For floating-point precision of 12: 4503 // 4504 // Log2ofMantissa = 4505 // -2.51285454f + 4506 // (4.07009056f + 4507 // (-2.12067489f + 4508 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x; 4509 // 4510 // error 0.0000876136000, which is better than 13 bits 4511 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4512 getF32Constant(DAG, 0xbda7262e, dl)); 4513 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4514 getF32Constant(DAG, 0x3f25280b, dl)); 4515 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4516 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4517 getF32Constant(DAG, 0x4007b923, dl)); 4518 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4519 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4520 getF32Constant(DAG, 0x40823e2f, dl)); 4521 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4522 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 4523 getF32Constant(DAG, 0x4020d29c, dl)); 4524 } else { // LimitFloatPrecision <= 18 4525 // For floating-point precision of 18: 4526 // 4527 // Log2ofMantissa = 4528 // -3.0400495f + 4529 // (6.1129976f + 4530 // (-5.3420409f + 4531 // (3.2865683f + 4532 // (-1.2669343f + 4533 // (0.27515199f - 4534 // 0.25691327e-1f * x) * x) * x) * x) * x) * x; 4535 // 4536 // error 0.0000018516, which is better than 18 bits 4537 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4538 getF32Constant(DAG, 0xbcd2769e, dl)); 4539 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4540 getF32Constant(DAG, 0x3e8ce0b9, dl)); 4541 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4542 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4543 getF32Constant(DAG, 0x3fa22ae7, dl)); 4544 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4545 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 4546 getF32Constant(DAG, 0x40525723, dl)); 4547 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4548 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 4549 getF32Constant(DAG, 0x40aaf200, dl)); 4550 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 4551 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 4552 getF32Constant(DAG, 0x40c39dad, dl)); 4553 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 4554 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, 4555 getF32Constant(DAG, 0x4042902c, dl)); 4556 } 4557 4558 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa); 4559 } 4560 4561 // No special expansion. 4562 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op); 4563 } 4564 4565 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for 4566 /// limited-precision mode. 4567 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 4568 const TargetLowering &TLI) { 4569 4570 // TODO: What fast-math-flags should be set on the floating-point nodes? 4571 4572 if (Op.getValueType() == MVT::f32 && 4573 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4574 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 4575 4576 // Scale the exponent by log10(2) [0.30102999f]. 4577 SDValue Exp = GetExponent(DAG, Op1, TLI, dl); 4578 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, 4579 getF32Constant(DAG, 0x3e9a209a, dl)); 4580 4581 // Get the significand and build it into a floating-point number with 4582 // exponent of 1. 4583 SDValue X = GetSignificand(DAG, Op1, dl); 4584 4585 SDValue Log10ofMantissa; 4586 if (LimitFloatPrecision <= 6) { 4587 // For floating-point precision of 6: 4588 // 4589 // Log10ofMantissa = 4590 // -0.50419619f + 4591 // (0.60948995f - 0.10380950f * x) * x; 4592 // 4593 // error 0.0014886165, which is 6 bits 4594 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4595 getF32Constant(DAG, 0xbdd49a13, dl)); 4596 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 4597 getF32Constant(DAG, 0x3f1c0789, dl)); 4598 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4599 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 4600 getF32Constant(DAG, 0x3f011300, dl)); 4601 } else if (LimitFloatPrecision <= 12) { 4602 // For floating-point precision of 12: 4603 // 4604 // Log10ofMantissa = 4605 // -0.64831180f + 4606 // (0.91751397f + 4607 // (-0.31664806f + 0.47637168e-1f * x) * x) * x; 4608 // 4609 // error 0.00019228036, which is better than 12 bits 4610 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4611 getF32Constant(DAG, 0x3d431f31, dl)); 4612 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, 4613 getF32Constant(DAG, 0x3ea21fb2, dl)); 4614 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4615 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4616 getF32Constant(DAG, 0x3f6ae232, dl)); 4617 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4618 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, 4619 getF32Constant(DAG, 0x3f25f7c3, dl)); 4620 } else { // LimitFloatPrecision <= 18 4621 // For floating-point precision of 18: 4622 // 4623 // Log10ofMantissa = 4624 // -0.84299375f + 4625 // (1.5327582f + 4626 // (-1.0688956f + 4627 // (0.49102474f + 4628 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x; 4629 // 4630 // error 0.0000037995730, which is better than 18 bits 4631 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 4632 getF32Constant(DAG, 0x3c5d51ce, dl)); 4633 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, 4634 getF32Constant(DAG, 0x3e00685a, dl)); 4635 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 4636 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 4637 getF32Constant(DAG, 0x3efb6798, dl)); 4638 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 4639 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, 4640 getF32Constant(DAG, 0x3f88d192, dl)); 4641 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 4642 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 4643 getF32Constant(DAG, 0x3fc4316c, dl)); 4644 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 4645 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8, 4646 getF32Constant(DAG, 0x3f57ce70, dl)); 4647 } 4648 4649 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa); 4650 } 4651 4652 // No special expansion. 4653 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op); 4654 } 4655 4656 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for 4657 /// limited-precision mode. 4658 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 4659 const TargetLowering &TLI) { 4660 if (Op.getValueType() == MVT::f32 && 4661 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) 4662 return getLimitedPrecisionExp2(Op, dl, DAG); 4663 4664 // No special expansion. 4665 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op); 4666 } 4667 4668 /// visitPow - Lower a pow intrinsic. Handles the special sequences for 4669 /// limited-precision mode with x == 10.0f. 4670 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, 4671 SelectionDAG &DAG, const TargetLowering &TLI) { 4672 bool IsExp10 = false; 4673 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 && 4674 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 4675 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) { 4676 APFloat Ten(10.0f); 4677 IsExp10 = LHSC->isExactlyValue(Ten); 4678 } 4679 } 4680 4681 // TODO: What fast-math-flags should be set on the FMUL node? 4682 if (IsExp10) { 4683 // Put the exponent in the right bit position for later addition to the 4684 // final result: 4685 // 4686 // #define LOG2OF10 3.3219281f 4687 // t0 = Op * LOG2OF10; 4688 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS, 4689 getF32Constant(DAG, 0x40549a78, dl)); 4690 return getLimitedPrecisionExp2(t0, dl, DAG); 4691 } 4692 4693 // No special expansion. 4694 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS); 4695 } 4696 4697 4698 /// ExpandPowI - Expand a llvm.powi intrinsic. 4699 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, 4700 SelectionDAG &DAG) { 4701 // If RHS is a constant, we can expand this out to a multiplication tree, 4702 // otherwise we end up lowering to a call to __powidf2 (for example). When 4703 // optimizing for size, we only want to do this if the expansion would produce 4704 // a small number of multiplies, otherwise we do the full expansion. 4705 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 4706 // Get the exponent as a positive value. 4707 unsigned Val = RHSC->getSExtValue(); 4708 if ((int)Val < 0) Val = -Val; 4709 4710 // powi(x, 0) -> 1.0 4711 if (Val == 0) 4712 return DAG.getConstantFP(1.0, DL, LHS.getValueType()); 4713 4714 const Function *F = DAG.getMachineFunction().getFunction(); 4715 if (!F->optForSize() || 4716 // If optimizing for size, don't insert too many multiplies. 4717 // This inserts up to 5 multiplies. 4718 countPopulation(Val) + Log2_32(Val) < 7) { 4719 // We use the simple binary decomposition method to generate the multiply 4720 // sequence. There are more optimal ways to do this (for example, 4721 // powi(x,15) generates one more multiply than it should), but this has 4722 // the benefit of being both really simple and much better than a libcall. 4723 SDValue Res; // Logically starts equal to 1.0 4724 SDValue CurSquare = LHS; 4725 // TODO: Intrinsics should have fast-math-flags that propagate to these 4726 // nodes. 4727 while (Val) { 4728 if (Val & 1) { 4729 if (Res.getNode()) 4730 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare); 4731 else 4732 Res = CurSquare; // 1.0*CurSquare. 4733 } 4734 4735 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(), 4736 CurSquare, CurSquare); 4737 Val >>= 1; 4738 } 4739 4740 // If the original was negative, invert the result, producing 1/(x*x*x). 4741 if (RHSC->getSExtValue() < 0) 4742 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(), 4743 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res); 4744 return Res; 4745 } 4746 } 4747 4748 // Otherwise, expand to a libcall. 4749 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS); 4750 } 4751 4752 // getUnderlyingArgReg - Find underlying register used for a truncated or 4753 // bitcasted argument. 4754 static unsigned getUnderlyingArgReg(const SDValue &N) { 4755 switch (N.getOpcode()) { 4756 case ISD::CopyFromReg: 4757 return cast<RegisterSDNode>(N.getOperand(1))->getReg(); 4758 case ISD::BITCAST: 4759 case ISD::AssertZext: 4760 case ISD::AssertSext: 4761 case ISD::TRUNCATE: 4762 return getUnderlyingArgReg(N.getOperand(0)); 4763 default: 4764 return 0; 4765 } 4766 } 4767 4768 /// If the DbgValueInst is a dbg_value of a function argument, create the 4769 /// corresponding DBG_VALUE machine instruction for it now. At the end of 4770 /// instruction selection, they will be inserted to the entry BB. 4771 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( 4772 const Value *V, DILocalVariable *Variable, DIExpression *Expr, 4773 DILocation *DL, bool IsDbgDeclare, const SDValue &N) { 4774 const Argument *Arg = dyn_cast<Argument>(V); 4775 if (!Arg) 4776 return false; 4777 4778 MachineFunction &MF = DAG.getMachineFunction(); 4779 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); 4780 4781 // Ignore inlined function arguments here. 4782 // 4783 // FIXME: Should we be checking DL->inlinedAt() to determine this? 4784 if (!Variable->getScope()->getSubprogram()->describes(MF.getFunction())) 4785 return false; 4786 4787 bool IsIndirect = false; 4788 Optional<MachineOperand> Op; 4789 // Some arguments' frame index is recorded during argument lowering. 4790 int FI = FuncInfo.getArgumentFrameIndex(Arg); 4791 if (FI != INT_MAX) 4792 Op = MachineOperand::CreateFI(FI); 4793 4794 if (!Op && N.getNode()) { 4795 unsigned Reg = getUnderlyingArgReg(N); 4796 if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) { 4797 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4798 unsigned PR = RegInfo.getLiveInPhysReg(Reg); 4799 if (PR) 4800 Reg = PR; 4801 } 4802 if (Reg) { 4803 Op = MachineOperand::CreateReg(Reg, false); 4804 IsIndirect = IsDbgDeclare; 4805 } 4806 } 4807 4808 if (!Op) { 4809 // Check if ValueMap has reg number. 4810 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V); 4811 if (VMI != FuncInfo.ValueMap.end()) { 4812 const auto &TLI = DAG.getTargetLoweringInfo(); 4813 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second, 4814 V->getType(), isABIRegCopy(V)); 4815 unsigned NumRegs = 4816 std::accumulate(RFV.RegCount.begin(), RFV.RegCount.end(), 0); 4817 if (NumRegs > 1) { 4818 unsigned I = 0; 4819 unsigned Offset = 0; 4820 auto RegisterVT = RFV.RegVTs.begin(); 4821 for (auto RegCount : RFV.RegCount) { 4822 unsigned RegisterSize = (RegisterVT++)->getSizeInBits(); 4823 for (unsigned E = I + RegCount; I != E; ++I) { 4824 // The vregs are guaranteed to be allocated in sequence. 4825 Op = MachineOperand::CreateReg(VMI->second + I, false); 4826 auto *FragmentExpr = DIExpression::createFragmentExpression( 4827 Expr, Offset, RegisterSize); 4828 FuncInfo.ArgDbgValues.push_back( 4829 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare, 4830 Op->getReg(), Variable, FragmentExpr)); 4831 Offset += RegisterSize; 4832 } 4833 } 4834 return true; 4835 } 4836 Op = MachineOperand::CreateReg(VMI->second, false); 4837 IsIndirect = IsDbgDeclare; 4838 } 4839 } 4840 4841 if (!Op && N.getNode()) 4842 // Check if frame index is available. 4843 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode())) 4844 if (FrameIndexSDNode *FINode = 4845 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) 4846 Op = MachineOperand::CreateFI(FINode->getIndex()); 4847 4848 if (!Op) 4849 return false; 4850 4851 assert(Variable->isValidLocationForIntrinsic(DL) && 4852 "Expected inlined-at fields to agree"); 4853 if (Op->isReg()) 4854 FuncInfo.ArgDbgValues.push_back( 4855 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect, 4856 Op->getReg(), Variable, Expr)); 4857 else 4858 FuncInfo.ArgDbgValues.push_back( 4859 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE)) 4860 .add(*Op) 4861 .addImm(0) 4862 .addMetadata(Variable) 4863 .addMetadata(Expr)); 4864 4865 return true; 4866 } 4867 4868 /// Return the appropriate SDDbgValue based on N. 4869 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N, 4870 DILocalVariable *Variable, 4871 DIExpression *Expr, 4872 const DebugLoc &dl, 4873 unsigned DbgSDNodeOrder) { 4874 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) { 4875 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe 4876 // stack slot locations as such instead of as indirectly addressed 4877 // locations. 4878 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(), dl, 4879 DbgSDNodeOrder); 4880 } 4881 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(), false, dl, 4882 DbgSDNodeOrder); 4883 } 4884 4885 // VisualStudio defines setjmp as _setjmp 4886 #if defined(_MSC_VER) && defined(setjmp) && \ 4887 !defined(setjmp_undefined_for_msvc) 4888 # pragma push_macro("setjmp") 4889 # undef setjmp 4890 # define setjmp_undefined_for_msvc 4891 #endif 4892 4893 /// Lower the call to the specified intrinsic function. If we want to emit this 4894 /// as a call to a named external function, return the name. Otherwise, lower it 4895 /// and return null. 4896 const char * 4897 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { 4898 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4899 SDLoc sdl = getCurSDLoc(); 4900 DebugLoc dl = getCurDebugLoc(); 4901 SDValue Res; 4902 4903 switch (Intrinsic) { 4904 default: 4905 // By default, turn this into a target intrinsic node. 4906 visitTargetIntrinsic(I, Intrinsic); 4907 return nullptr; 4908 case Intrinsic::vastart: visitVAStart(I); return nullptr; 4909 case Intrinsic::vaend: visitVAEnd(I); return nullptr; 4910 case Intrinsic::vacopy: visitVACopy(I); return nullptr; 4911 case Intrinsic::returnaddress: 4912 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, 4913 TLI.getPointerTy(DAG.getDataLayout()), 4914 getValue(I.getArgOperand(0)))); 4915 return nullptr; 4916 case Intrinsic::addressofreturnaddress: 4917 setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl, 4918 TLI.getPointerTy(DAG.getDataLayout()))); 4919 return nullptr; 4920 case Intrinsic::frameaddress: 4921 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, 4922 TLI.getPointerTy(DAG.getDataLayout()), 4923 getValue(I.getArgOperand(0)))); 4924 return nullptr; 4925 case Intrinsic::read_register: { 4926 Value *Reg = I.getArgOperand(0); 4927 SDValue Chain = getRoot(); 4928 SDValue RegName = 4929 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); 4930 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 4931 Res = DAG.getNode(ISD::READ_REGISTER, sdl, 4932 DAG.getVTList(VT, MVT::Other), Chain, RegName); 4933 setValue(&I, Res); 4934 DAG.setRoot(Res.getValue(1)); 4935 return nullptr; 4936 } 4937 case Intrinsic::write_register: { 4938 Value *Reg = I.getArgOperand(0); 4939 Value *RegValue = I.getArgOperand(1); 4940 SDValue Chain = getRoot(); 4941 SDValue RegName = 4942 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); 4943 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain, 4944 RegName, getValue(RegValue))); 4945 return nullptr; 4946 } 4947 case Intrinsic::setjmp: 4948 return &"_setjmp"[!TLI.usesUnderscoreSetJmp()]; 4949 case Intrinsic::longjmp: 4950 return &"_longjmp"[!TLI.usesUnderscoreLongJmp()]; 4951 case Intrinsic::memcpy: { 4952 SDValue Op1 = getValue(I.getArgOperand(0)); 4953 SDValue Op2 = getValue(I.getArgOperand(1)); 4954 SDValue Op3 = getValue(I.getArgOperand(2)); 4955 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); 4956 if (!Align) 4957 Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment. 4958 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue(); 4959 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); 4960 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, 4961 false, isTC, 4962 MachinePointerInfo(I.getArgOperand(0)), 4963 MachinePointerInfo(I.getArgOperand(1))); 4964 updateDAGForMaybeTailCall(MC); 4965 return nullptr; 4966 } 4967 case Intrinsic::memset: { 4968 SDValue Op1 = getValue(I.getArgOperand(0)); 4969 SDValue Op2 = getValue(I.getArgOperand(1)); 4970 SDValue Op3 = getValue(I.getArgOperand(2)); 4971 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); 4972 if (!Align) 4973 Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment. 4974 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue(); 4975 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); 4976 SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, 4977 isTC, MachinePointerInfo(I.getArgOperand(0))); 4978 updateDAGForMaybeTailCall(MS); 4979 return nullptr; 4980 } 4981 case Intrinsic::memmove: { 4982 SDValue Op1 = getValue(I.getArgOperand(0)); 4983 SDValue Op2 = getValue(I.getArgOperand(1)); 4984 SDValue Op3 = getValue(I.getArgOperand(2)); 4985 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue(); 4986 if (!Align) 4987 Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment. 4988 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue(); 4989 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); 4990 SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, 4991 isTC, MachinePointerInfo(I.getArgOperand(0)), 4992 MachinePointerInfo(I.getArgOperand(1))); 4993 updateDAGForMaybeTailCall(MM); 4994 return nullptr; 4995 } 4996 case Intrinsic::memcpy_element_unordered_atomic: { 4997 const ElementUnorderedAtomicMemCpyInst &MI = 4998 cast<ElementUnorderedAtomicMemCpyInst>(I); 4999 SDValue Dst = getValue(MI.getRawDest()); 5000 SDValue Src = getValue(MI.getRawSource()); 5001 SDValue Length = getValue(MI.getLength()); 5002 5003 // Emit a library call. 5004 TargetLowering::ArgListTy Args; 5005 TargetLowering::ArgListEntry Entry; 5006 Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 5007 Entry.Node = Dst; 5008 Args.push_back(Entry); 5009 5010 Entry.Node = Src; 5011 Args.push_back(Entry); 5012 5013 Entry.Ty = MI.getLength()->getType(); 5014 Entry.Node = Length; 5015 Args.push_back(Entry); 5016 5017 uint64_t ElementSizeConstant = MI.getElementSizeInBytes(); 5018 RTLIB::Libcall LibraryCall = 5019 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElementSizeConstant); 5020 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 5021 report_fatal_error("Unsupported element size"); 5022 5023 TargetLowering::CallLoweringInfo CLI(DAG); 5024 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee( 5025 TLI.getLibcallCallingConv(LibraryCall), 5026 Type::getVoidTy(*DAG.getContext()), 5027 DAG.getExternalSymbol(TLI.getLibcallName(LibraryCall), 5028 TLI.getPointerTy(DAG.getDataLayout())), 5029 std::move(Args)); 5030 5031 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); 5032 DAG.setRoot(CallResult.second); 5033 return nullptr; 5034 } 5035 case Intrinsic::memmove_element_unordered_atomic: { 5036 auto &MI = cast<ElementUnorderedAtomicMemMoveInst>(I); 5037 SDValue Dst = getValue(MI.getRawDest()); 5038 SDValue Src = getValue(MI.getRawSource()); 5039 SDValue Length = getValue(MI.getLength()); 5040 5041 // Emit a library call. 5042 TargetLowering::ArgListTy Args; 5043 TargetLowering::ArgListEntry Entry; 5044 Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 5045 Entry.Node = Dst; 5046 Args.push_back(Entry); 5047 5048 Entry.Node = Src; 5049 Args.push_back(Entry); 5050 5051 Entry.Ty = MI.getLength()->getType(); 5052 Entry.Node = Length; 5053 Args.push_back(Entry); 5054 5055 uint64_t ElementSizeConstant = MI.getElementSizeInBytes(); 5056 RTLIB::Libcall LibraryCall = 5057 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElementSizeConstant); 5058 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 5059 report_fatal_error("Unsupported element size"); 5060 5061 TargetLowering::CallLoweringInfo CLI(DAG); 5062 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee( 5063 TLI.getLibcallCallingConv(LibraryCall), 5064 Type::getVoidTy(*DAG.getContext()), 5065 DAG.getExternalSymbol(TLI.getLibcallName(LibraryCall), 5066 TLI.getPointerTy(DAG.getDataLayout())), 5067 std::move(Args)); 5068 5069 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); 5070 DAG.setRoot(CallResult.second); 5071 return nullptr; 5072 } 5073 case Intrinsic::memset_element_unordered_atomic: { 5074 auto &MI = cast<ElementUnorderedAtomicMemSetInst>(I); 5075 SDValue Dst = getValue(MI.getRawDest()); 5076 SDValue Val = getValue(MI.getValue()); 5077 SDValue Length = getValue(MI.getLength()); 5078 5079 // Emit a library call. 5080 TargetLowering::ArgListTy Args; 5081 TargetLowering::ArgListEntry Entry; 5082 Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 5083 Entry.Node = Dst; 5084 Args.push_back(Entry); 5085 5086 Entry.Ty = Type::getInt8Ty(*DAG.getContext()); 5087 Entry.Node = Val; 5088 Args.push_back(Entry); 5089 5090 Entry.Ty = MI.getLength()->getType(); 5091 Entry.Node = Length; 5092 Args.push_back(Entry); 5093 5094 uint64_t ElementSizeConstant = MI.getElementSizeInBytes(); 5095 RTLIB::Libcall LibraryCall = 5096 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElementSizeConstant); 5097 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) 5098 report_fatal_error("Unsupported element size"); 5099 5100 TargetLowering::CallLoweringInfo CLI(DAG); 5101 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee( 5102 TLI.getLibcallCallingConv(LibraryCall), 5103 Type::getVoidTy(*DAG.getContext()), 5104 DAG.getExternalSymbol(TLI.getLibcallName(LibraryCall), 5105 TLI.getPointerTy(DAG.getDataLayout())), 5106 std::move(Args)); 5107 5108 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); 5109 DAG.setRoot(CallResult.second); 5110 return nullptr; 5111 } 5112 case Intrinsic::dbg_declare: { 5113 const DbgDeclareInst &DI = cast<DbgDeclareInst>(I); 5114 DILocalVariable *Variable = DI.getVariable(); 5115 DIExpression *Expression = DI.getExpression(); 5116 const Value *Address = DI.getAddress(); 5117 assert(Variable && "Missing variable"); 5118 if (!Address) { 5119 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 5120 return nullptr; 5121 } 5122 5123 // Check if address has undef value. 5124 if (isa<UndefValue>(Address) || 5125 (Address->use_empty() && !isa<Argument>(Address))) { 5126 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 5127 return nullptr; 5128 } 5129 5130 // Static allocas are handled more efficiently in the variable frame index 5131 // side table. 5132 if (const auto *AI = 5133 dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) 5134 if (AI->isStaticAlloca() && FuncInfo.StaticAllocaMap.count(AI)) 5135 return nullptr; 5136 5137 // Byval arguments with frame indices were already handled after argument 5138 // lowering and before isel. 5139 if (const auto *Arg = 5140 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets())) 5141 if (FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX) 5142 return nullptr; 5143 5144 SDValue &N = NodeMap[Address]; 5145 if (!N.getNode() && isa<Argument>(Address)) 5146 // Check unused arguments map. 5147 N = UnusedArgNodeMap[Address]; 5148 SDDbgValue *SDV; 5149 if (N.getNode()) { 5150 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address)) 5151 Address = BCI->getOperand(0); 5152 // Parameters are handled specially. 5153 bool isParameter = Variable->isParameter() || isa<Argument>(Address); 5154 auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode()); 5155 if (isParameter && FINode) { 5156 // Byval parameter. We have a frame index at this point. 5157 SDV = DAG.getFrameIndexDbgValue(Variable, Expression, 5158 FINode->getIndex(), dl, SDNodeOrder); 5159 } else if (isa<Argument>(Address)) { 5160 // Address is an argument, so try to emit its dbg value using 5161 // virtual register info from the FuncInfo.ValueMap. 5162 EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N); 5163 return nullptr; 5164 } else { 5165 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(), 5166 true, dl, SDNodeOrder); 5167 } 5168 DAG.AddDbgValue(SDV, N.getNode(), isParameter); 5169 } else { 5170 // If Address is an argument then try to emit its dbg value using 5171 // virtual register info from the FuncInfo.ValueMap. 5172 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, 5173 N)) { 5174 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); 5175 } 5176 } 5177 return nullptr; 5178 } 5179 case Intrinsic::dbg_value: { 5180 const DbgValueInst &DI = cast<DbgValueInst>(I); 5181 assert(DI.getVariable() && "Missing variable"); 5182 5183 DILocalVariable *Variable = DI.getVariable(); 5184 DIExpression *Expression = DI.getExpression(); 5185 const Value *V = DI.getValue(); 5186 if (!V) 5187 return nullptr; 5188 5189 SDDbgValue *SDV; 5190 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) { 5191 SDV = DAG.getConstantDbgValue(Variable, Expression, V, dl, SDNodeOrder); 5192 DAG.AddDbgValue(SDV, nullptr, false); 5193 return nullptr; 5194 } 5195 5196 // Do not use getValue() in here; we don't want to generate code at 5197 // this point if it hasn't been done yet. 5198 SDValue N = NodeMap[V]; 5199 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map. 5200 N = UnusedArgNodeMap[V]; 5201 if (N.getNode()) { 5202 if (EmitFuncArgumentDbgValue(V, Variable, Expression, dl, false, N)) 5203 return nullptr; 5204 SDV = getDbgValue(N, Variable, Expression, dl, SDNodeOrder); 5205 DAG.AddDbgValue(SDV, N.getNode(), false); 5206 return nullptr; 5207 } 5208 5209 if (!V->use_empty() ) { 5210 // Do not call getValue(V) yet, as we don't want to generate code. 5211 // Remember it for later. 5212 DanglingDebugInfo DDI(&DI, dl, SDNodeOrder); 5213 DanglingDebugInfoMap[V] = DDI; 5214 return nullptr; 5215 } 5216 5217 DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n"); 5218 DEBUG(dbgs() << " Last seen at:\n " << *V << "\n"); 5219 return nullptr; 5220 } 5221 5222 case Intrinsic::eh_typeid_for: { 5223 // Find the type id for the given typeinfo. 5224 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0)); 5225 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV); 5226 Res = DAG.getConstant(TypeID, sdl, MVT::i32); 5227 setValue(&I, Res); 5228 return nullptr; 5229 } 5230 5231 case Intrinsic::eh_return_i32: 5232 case Intrinsic::eh_return_i64: 5233 DAG.getMachineFunction().setCallsEHReturn(true); 5234 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl, 5235 MVT::Other, 5236 getControlRoot(), 5237 getValue(I.getArgOperand(0)), 5238 getValue(I.getArgOperand(1)))); 5239 return nullptr; 5240 case Intrinsic::eh_unwind_init: 5241 DAG.getMachineFunction().setCallsUnwindInit(true); 5242 return nullptr; 5243 case Intrinsic::eh_dwarf_cfa: { 5244 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl, 5245 TLI.getPointerTy(DAG.getDataLayout()), 5246 getValue(I.getArgOperand(0)))); 5247 return nullptr; 5248 } 5249 case Intrinsic::eh_sjlj_callsite: { 5250 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 5251 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0)); 5252 assert(CI && "Non-constant call site value in eh.sjlj.callsite!"); 5253 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!"); 5254 5255 MMI.setCurrentCallSite(CI->getZExtValue()); 5256 return nullptr; 5257 } 5258 case Intrinsic::eh_sjlj_functioncontext: { 5259 // Get and store the index of the function context. 5260 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 5261 AllocaInst *FnCtx = 5262 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts()); 5263 int FI = FuncInfo.StaticAllocaMap[FnCtx]; 5264 MFI.setFunctionContextIndex(FI); 5265 return nullptr; 5266 } 5267 case Intrinsic::eh_sjlj_setjmp: { 5268 SDValue Ops[2]; 5269 Ops[0] = getRoot(); 5270 Ops[1] = getValue(I.getArgOperand(0)); 5271 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl, 5272 DAG.getVTList(MVT::i32, MVT::Other), Ops); 5273 setValue(&I, Op.getValue(0)); 5274 DAG.setRoot(Op.getValue(1)); 5275 return nullptr; 5276 } 5277 case Intrinsic::eh_sjlj_longjmp: { 5278 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other, 5279 getRoot(), getValue(I.getArgOperand(0)))); 5280 return nullptr; 5281 } 5282 case Intrinsic::eh_sjlj_setup_dispatch: { 5283 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other, 5284 getRoot())); 5285 return nullptr; 5286 } 5287 5288 case Intrinsic::masked_gather: 5289 visitMaskedGather(I); 5290 return nullptr; 5291 case Intrinsic::masked_load: 5292 visitMaskedLoad(I); 5293 return nullptr; 5294 case Intrinsic::masked_scatter: 5295 visitMaskedScatter(I); 5296 return nullptr; 5297 case Intrinsic::masked_store: 5298 visitMaskedStore(I); 5299 return nullptr; 5300 case Intrinsic::masked_expandload: 5301 visitMaskedLoad(I, true /* IsExpanding */); 5302 return nullptr; 5303 case Intrinsic::masked_compressstore: 5304 visitMaskedStore(I, true /* IsCompressing */); 5305 return nullptr; 5306 case Intrinsic::x86_mmx_pslli_w: 5307 case Intrinsic::x86_mmx_pslli_d: 5308 case Intrinsic::x86_mmx_pslli_q: 5309 case Intrinsic::x86_mmx_psrli_w: 5310 case Intrinsic::x86_mmx_psrli_d: 5311 case Intrinsic::x86_mmx_psrli_q: 5312 case Intrinsic::x86_mmx_psrai_w: 5313 case Intrinsic::x86_mmx_psrai_d: { 5314 SDValue ShAmt = getValue(I.getArgOperand(1)); 5315 if (isa<ConstantSDNode>(ShAmt)) { 5316 visitTargetIntrinsic(I, Intrinsic); 5317 return nullptr; 5318 } 5319 unsigned NewIntrinsic = 0; 5320 EVT ShAmtVT = MVT::v2i32; 5321 switch (Intrinsic) { 5322 case Intrinsic::x86_mmx_pslli_w: 5323 NewIntrinsic = Intrinsic::x86_mmx_psll_w; 5324 break; 5325 case Intrinsic::x86_mmx_pslli_d: 5326 NewIntrinsic = Intrinsic::x86_mmx_psll_d; 5327 break; 5328 case Intrinsic::x86_mmx_pslli_q: 5329 NewIntrinsic = Intrinsic::x86_mmx_psll_q; 5330 break; 5331 case Intrinsic::x86_mmx_psrli_w: 5332 NewIntrinsic = Intrinsic::x86_mmx_psrl_w; 5333 break; 5334 case Intrinsic::x86_mmx_psrli_d: 5335 NewIntrinsic = Intrinsic::x86_mmx_psrl_d; 5336 break; 5337 case Intrinsic::x86_mmx_psrli_q: 5338 NewIntrinsic = Intrinsic::x86_mmx_psrl_q; 5339 break; 5340 case Intrinsic::x86_mmx_psrai_w: 5341 NewIntrinsic = Intrinsic::x86_mmx_psra_w; 5342 break; 5343 case Intrinsic::x86_mmx_psrai_d: 5344 NewIntrinsic = Intrinsic::x86_mmx_psra_d; 5345 break; 5346 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 5347 } 5348 5349 // The vector shift intrinsics with scalars uses 32b shift amounts but 5350 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits 5351 // to be zero. 5352 // We must do this early because v2i32 is not a legal type. 5353 SDValue ShOps[2]; 5354 ShOps[0] = ShAmt; 5355 ShOps[1] = DAG.getConstant(0, sdl, MVT::i32); 5356 ShAmt = DAG.getBuildVector(ShAmtVT, sdl, ShOps); 5357 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 5358 ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt); 5359 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT, 5360 DAG.getConstant(NewIntrinsic, sdl, MVT::i32), 5361 getValue(I.getArgOperand(0)), ShAmt); 5362 setValue(&I, Res); 5363 return nullptr; 5364 } 5365 case Intrinsic::powi: 5366 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)), 5367 getValue(I.getArgOperand(1)), DAG)); 5368 return nullptr; 5369 case Intrinsic::log: 5370 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 5371 return nullptr; 5372 case Intrinsic::log2: 5373 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 5374 return nullptr; 5375 case Intrinsic::log10: 5376 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 5377 return nullptr; 5378 case Intrinsic::exp: 5379 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 5380 return nullptr; 5381 case Intrinsic::exp2: 5382 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); 5383 return nullptr; 5384 case Intrinsic::pow: 5385 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)), 5386 getValue(I.getArgOperand(1)), DAG, TLI)); 5387 return nullptr; 5388 case Intrinsic::sqrt: 5389 case Intrinsic::fabs: 5390 case Intrinsic::sin: 5391 case Intrinsic::cos: 5392 case Intrinsic::floor: 5393 case Intrinsic::ceil: 5394 case Intrinsic::trunc: 5395 case Intrinsic::rint: 5396 case Intrinsic::nearbyint: 5397 case Intrinsic::round: 5398 case Intrinsic::canonicalize: { 5399 unsigned Opcode; 5400 switch (Intrinsic) { 5401 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 5402 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 5403 case Intrinsic::fabs: Opcode = ISD::FABS; break; 5404 case Intrinsic::sin: Opcode = ISD::FSIN; break; 5405 case Intrinsic::cos: Opcode = ISD::FCOS; break; 5406 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 5407 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 5408 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 5409 case Intrinsic::rint: Opcode = ISD::FRINT; break; 5410 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 5411 case Intrinsic::round: Opcode = ISD::FROUND; break; 5412 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break; 5413 } 5414 5415 setValue(&I, DAG.getNode(Opcode, sdl, 5416 getValue(I.getArgOperand(0)).getValueType(), 5417 getValue(I.getArgOperand(0)))); 5418 return nullptr; 5419 } 5420 case Intrinsic::minnum: { 5421 auto VT = getValue(I.getArgOperand(0)).getValueType(); 5422 unsigned Opc = 5423 I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT) 5424 ? ISD::FMINNAN 5425 : ISD::FMINNUM; 5426 setValue(&I, DAG.getNode(Opc, sdl, VT, 5427 getValue(I.getArgOperand(0)), 5428 getValue(I.getArgOperand(1)))); 5429 return nullptr; 5430 } 5431 case Intrinsic::maxnum: { 5432 auto VT = getValue(I.getArgOperand(0)).getValueType(); 5433 unsigned Opc = 5434 I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT) 5435 ? ISD::FMAXNAN 5436 : ISD::FMAXNUM; 5437 setValue(&I, DAG.getNode(Opc, sdl, VT, 5438 getValue(I.getArgOperand(0)), 5439 getValue(I.getArgOperand(1)))); 5440 return nullptr; 5441 } 5442 case Intrinsic::copysign: 5443 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl, 5444 getValue(I.getArgOperand(0)).getValueType(), 5445 getValue(I.getArgOperand(0)), 5446 getValue(I.getArgOperand(1)))); 5447 return nullptr; 5448 case Intrinsic::fma: 5449 setValue(&I, DAG.getNode(ISD::FMA, sdl, 5450 getValue(I.getArgOperand(0)).getValueType(), 5451 getValue(I.getArgOperand(0)), 5452 getValue(I.getArgOperand(1)), 5453 getValue(I.getArgOperand(2)))); 5454 return nullptr; 5455 case Intrinsic::experimental_constrained_fadd: 5456 case Intrinsic::experimental_constrained_fsub: 5457 case Intrinsic::experimental_constrained_fmul: 5458 case Intrinsic::experimental_constrained_fdiv: 5459 case Intrinsic::experimental_constrained_frem: 5460 case Intrinsic::experimental_constrained_fma: 5461 case Intrinsic::experimental_constrained_sqrt: 5462 case Intrinsic::experimental_constrained_pow: 5463 case Intrinsic::experimental_constrained_powi: 5464 case Intrinsic::experimental_constrained_sin: 5465 case Intrinsic::experimental_constrained_cos: 5466 case Intrinsic::experimental_constrained_exp: 5467 case Intrinsic::experimental_constrained_exp2: 5468 case Intrinsic::experimental_constrained_log: 5469 case Intrinsic::experimental_constrained_log10: 5470 case Intrinsic::experimental_constrained_log2: 5471 case Intrinsic::experimental_constrained_rint: 5472 case Intrinsic::experimental_constrained_nearbyint: 5473 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I)); 5474 return nullptr; 5475 case Intrinsic::fmuladd: { 5476 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 5477 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 5478 TLI.isFMAFasterThanFMulAndFAdd(VT)) { 5479 setValue(&I, DAG.getNode(ISD::FMA, sdl, 5480 getValue(I.getArgOperand(0)).getValueType(), 5481 getValue(I.getArgOperand(0)), 5482 getValue(I.getArgOperand(1)), 5483 getValue(I.getArgOperand(2)))); 5484 } else { 5485 // TODO: Intrinsic calls should have fast-math-flags. 5486 SDValue Mul = DAG.getNode(ISD::FMUL, sdl, 5487 getValue(I.getArgOperand(0)).getValueType(), 5488 getValue(I.getArgOperand(0)), 5489 getValue(I.getArgOperand(1))); 5490 SDValue Add = DAG.getNode(ISD::FADD, sdl, 5491 getValue(I.getArgOperand(0)).getValueType(), 5492 Mul, 5493 getValue(I.getArgOperand(2))); 5494 setValue(&I, Add); 5495 } 5496 return nullptr; 5497 } 5498 case Intrinsic::convert_to_fp16: 5499 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16, 5500 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16, 5501 getValue(I.getArgOperand(0)), 5502 DAG.getTargetConstant(0, sdl, 5503 MVT::i32)))); 5504 return nullptr; 5505 case Intrinsic::convert_from_fp16: 5506 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl, 5507 TLI.getValueType(DAG.getDataLayout(), I.getType()), 5508 DAG.getNode(ISD::BITCAST, sdl, MVT::f16, 5509 getValue(I.getArgOperand(0))))); 5510 return nullptr; 5511 case Intrinsic::pcmarker: { 5512 SDValue Tmp = getValue(I.getArgOperand(0)); 5513 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp)); 5514 return nullptr; 5515 } 5516 case Intrinsic::readcyclecounter: { 5517 SDValue Op = getRoot(); 5518 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl, 5519 DAG.getVTList(MVT::i64, MVT::Other), Op); 5520 setValue(&I, Res); 5521 DAG.setRoot(Res.getValue(1)); 5522 return nullptr; 5523 } 5524 case Intrinsic::bitreverse: 5525 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl, 5526 getValue(I.getArgOperand(0)).getValueType(), 5527 getValue(I.getArgOperand(0)))); 5528 return nullptr; 5529 case Intrinsic::bswap: 5530 setValue(&I, DAG.getNode(ISD::BSWAP, sdl, 5531 getValue(I.getArgOperand(0)).getValueType(), 5532 getValue(I.getArgOperand(0)))); 5533 return nullptr; 5534 case Intrinsic::cttz: { 5535 SDValue Arg = getValue(I.getArgOperand(0)); 5536 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); 5537 EVT Ty = Arg.getValueType(); 5538 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF, 5539 sdl, Ty, Arg)); 5540 return nullptr; 5541 } 5542 case Intrinsic::ctlz: { 5543 SDValue Arg = getValue(I.getArgOperand(0)); 5544 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); 5545 EVT Ty = Arg.getValueType(); 5546 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF, 5547 sdl, Ty, Arg)); 5548 return nullptr; 5549 } 5550 case Intrinsic::ctpop: { 5551 SDValue Arg = getValue(I.getArgOperand(0)); 5552 EVT Ty = Arg.getValueType(); 5553 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg)); 5554 return nullptr; 5555 } 5556 case Intrinsic::stacksave: { 5557 SDValue Op = getRoot(); 5558 Res = DAG.getNode( 5559 ISD::STACKSAVE, sdl, 5560 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op); 5561 setValue(&I, Res); 5562 DAG.setRoot(Res.getValue(1)); 5563 return nullptr; 5564 } 5565 case Intrinsic::stackrestore: { 5566 Res = getValue(I.getArgOperand(0)); 5567 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res)); 5568 return nullptr; 5569 } 5570 case Intrinsic::get_dynamic_area_offset: { 5571 SDValue Op = getRoot(); 5572 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 5573 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType()); 5574 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for 5575 // target. 5576 if (PtrTy != ResTy) 5577 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset" 5578 " intrinsic!"); 5579 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy), 5580 Op); 5581 DAG.setRoot(Op); 5582 setValue(&I, Res); 5583 return nullptr; 5584 } 5585 case Intrinsic::stackguard: { 5586 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 5587 MachineFunction &MF = DAG.getMachineFunction(); 5588 const Module &M = *MF.getFunction()->getParent(); 5589 SDValue Chain = getRoot(); 5590 if (TLI.useLoadStackGuardNode()) { 5591 Res = getLoadStackGuard(DAG, sdl, Chain); 5592 } else { 5593 const Value *Global = TLI.getSDagStackGuard(M); 5594 unsigned Align = DL->getPrefTypeAlignment(Global->getType()); 5595 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global), 5596 MachinePointerInfo(Global, 0), Align, 5597 MachineMemOperand::MOVolatile); 5598 } 5599 DAG.setRoot(Chain); 5600 setValue(&I, Res); 5601 return nullptr; 5602 } 5603 case Intrinsic::stackprotector: { 5604 // Emit code into the DAG to store the stack guard onto the stack. 5605 MachineFunction &MF = DAG.getMachineFunction(); 5606 MachineFrameInfo &MFI = MF.getFrameInfo(); 5607 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 5608 SDValue Src, Chain = getRoot(); 5609 5610 if (TLI.useLoadStackGuardNode()) 5611 Src = getLoadStackGuard(DAG, sdl, Chain); 5612 else 5613 Src = getValue(I.getArgOperand(0)); // The guard's value. 5614 5615 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1)); 5616 5617 int FI = FuncInfo.StaticAllocaMap[Slot]; 5618 MFI.setStackProtectorIndex(FI); 5619 5620 SDValue FIN = DAG.getFrameIndex(FI, PtrTy); 5621 5622 // Store the stack protector onto the stack. 5623 Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack( 5624 DAG.getMachineFunction(), FI), 5625 /* Alignment = */ 0, MachineMemOperand::MOVolatile); 5626 setValue(&I, Res); 5627 DAG.setRoot(Res); 5628 return nullptr; 5629 } 5630 case Intrinsic::objectsize: { 5631 // If we don't know by now, we're never going to know. 5632 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1)); 5633 5634 assert(CI && "Non-constant type in __builtin_object_size?"); 5635 5636 SDValue Arg = getValue(I.getCalledValue()); 5637 EVT Ty = Arg.getValueType(); 5638 5639 if (CI->isZero()) 5640 Res = DAG.getConstant(-1ULL, sdl, Ty); 5641 else 5642 Res = DAG.getConstant(0, sdl, Ty); 5643 5644 setValue(&I, Res); 5645 return nullptr; 5646 } 5647 case Intrinsic::annotation: 5648 case Intrinsic::ptr_annotation: 5649 case Intrinsic::invariant_group_barrier: 5650 // Drop the intrinsic, but forward the value 5651 setValue(&I, getValue(I.getOperand(0))); 5652 return nullptr; 5653 case Intrinsic::assume: 5654 case Intrinsic::var_annotation: 5655 // Discard annotate attributes and assumptions 5656 return nullptr; 5657 5658 case Intrinsic::codeview_annotation: { 5659 // Emit a label associated with this metadata. 5660 MachineFunction &MF = DAG.getMachineFunction(); 5661 MCSymbol *Label = 5662 MF.getMMI().getContext().createTempSymbol("annotation", true); 5663 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata(); 5664 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD)); 5665 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label); 5666 DAG.setRoot(Res); 5667 return nullptr; 5668 } 5669 5670 case Intrinsic::init_trampoline: { 5671 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts()); 5672 5673 SDValue Ops[6]; 5674 Ops[0] = getRoot(); 5675 Ops[1] = getValue(I.getArgOperand(0)); 5676 Ops[2] = getValue(I.getArgOperand(1)); 5677 Ops[3] = getValue(I.getArgOperand(2)); 5678 Ops[4] = DAG.getSrcValue(I.getArgOperand(0)); 5679 Ops[5] = DAG.getSrcValue(F); 5680 5681 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops); 5682 5683 DAG.setRoot(Res); 5684 return nullptr; 5685 } 5686 case Intrinsic::adjust_trampoline: { 5687 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl, 5688 TLI.getPointerTy(DAG.getDataLayout()), 5689 getValue(I.getArgOperand(0)))); 5690 return nullptr; 5691 } 5692 case Intrinsic::gcroot: { 5693 MachineFunction &MF = DAG.getMachineFunction(); 5694 const Function *F = MF.getFunction(); 5695 (void)F; 5696 assert(F->hasGC() && 5697 "only valid in functions with gc specified, enforced by Verifier"); 5698 assert(GFI && "implied by previous"); 5699 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts(); 5700 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1)); 5701 5702 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode()); 5703 GFI->addStackRoot(FI->getIndex(), TypeMap); 5704 return nullptr; 5705 } 5706 case Intrinsic::gcread: 5707 case Intrinsic::gcwrite: 5708 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!"); 5709 case Intrinsic::flt_rounds: 5710 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32)); 5711 return nullptr; 5712 5713 case Intrinsic::expect: { 5714 // Just replace __builtin_expect(exp, c) with EXP. 5715 setValue(&I, getValue(I.getArgOperand(0))); 5716 return nullptr; 5717 } 5718 5719 case Intrinsic::debugtrap: 5720 case Intrinsic::trap: { 5721 StringRef TrapFuncName = 5722 I.getAttributes() 5723 .getAttribute(AttributeList::FunctionIndex, "trap-func-name") 5724 .getValueAsString(); 5725 if (TrapFuncName.empty()) { 5726 ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ? 5727 ISD::TRAP : ISD::DEBUGTRAP; 5728 DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot())); 5729 return nullptr; 5730 } 5731 TargetLowering::ArgListTy Args; 5732 5733 TargetLowering::CallLoweringInfo CLI(DAG); 5734 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee( 5735 CallingConv::C, I.getType(), 5736 DAG.getExternalSymbol(TrapFuncName.data(), 5737 TLI.getPointerTy(DAG.getDataLayout())), 5738 std::move(Args)); 5739 5740 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 5741 DAG.setRoot(Result.second); 5742 return nullptr; 5743 } 5744 5745 case Intrinsic::uadd_with_overflow: 5746 case Intrinsic::sadd_with_overflow: 5747 case Intrinsic::usub_with_overflow: 5748 case Intrinsic::ssub_with_overflow: 5749 case Intrinsic::umul_with_overflow: 5750 case Intrinsic::smul_with_overflow: { 5751 ISD::NodeType Op; 5752 switch (Intrinsic) { 5753 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 5754 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break; 5755 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break; 5756 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break; 5757 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break; 5758 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break; 5759 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break; 5760 } 5761 SDValue Op1 = getValue(I.getArgOperand(0)); 5762 SDValue Op2 = getValue(I.getArgOperand(1)); 5763 5764 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1); 5765 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2)); 5766 return nullptr; 5767 } 5768 case Intrinsic::prefetch: { 5769 SDValue Ops[5]; 5770 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); 5771 Ops[0] = getRoot(); 5772 Ops[1] = getValue(I.getArgOperand(0)); 5773 Ops[2] = getValue(I.getArgOperand(1)); 5774 Ops[3] = getValue(I.getArgOperand(2)); 5775 Ops[4] = getValue(I.getArgOperand(3)); 5776 DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl, 5777 DAG.getVTList(MVT::Other), Ops, 5778 EVT::getIntegerVT(*Context, 8), 5779 MachinePointerInfo(I.getArgOperand(0)), 5780 0, /* align */ 5781 false, /* volatile */ 5782 rw==0, /* read */ 5783 rw==1)); /* write */ 5784 return nullptr; 5785 } 5786 case Intrinsic::lifetime_start: 5787 case Intrinsic::lifetime_end: { 5788 bool IsStart = (Intrinsic == Intrinsic::lifetime_start); 5789 // Stack coloring is not enabled in O0, discard region information. 5790 if (TM.getOptLevel() == CodeGenOpt::None) 5791 return nullptr; 5792 5793 SmallVector<Value *, 4> Allocas; 5794 GetUnderlyingObjects(I.getArgOperand(1), Allocas, *DL); 5795 5796 for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(), 5797 E = Allocas.end(); Object != E; ++Object) { 5798 AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object); 5799 5800 // Could not find an Alloca. 5801 if (!LifetimeObject) 5802 continue; 5803 5804 // First check that the Alloca is static, otherwise it won't have a 5805 // valid frame index. 5806 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject); 5807 if (SI == FuncInfo.StaticAllocaMap.end()) 5808 return nullptr; 5809 5810 int FI = SI->second; 5811 5812 SDValue Ops[2]; 5813 Ops[0] = getRoot(); 5814 Ops[1] = 5815 DAG.getFrameIndex(FI, TLI.getFrameIndexTy(DAG.getDataLayout()), true); 5816 unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END); 5817 5818 Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops); 5819 DAG.setRoot(Res); 5820 } 5821 return nullptr; 5822 } 5823 case Intrinsic::invariant_start: 5824 // Discard region information. 5825 setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout()))); 5826 return nullptr; 5827 case Intrinsic::invariant_end: 5828 // Discard region information. 5829 return nullptr; 5830 case Intrinsic::clear_cache: 5831 return TLI.getClearCacheBuiltinName(); 5832 case Intrinsic::donothing: 5833 // ignore 5834 return nullptr; 5835 case Intrinsic::experimental_stackmap: { 5836 visitStackmap(I); 5837 return nullptr; 5838 } 5839 case Intrinsic::experimental_patchpoint_void: 5840 case Intrinsic::experimental_patchpoint_i64: { 5841 visitPatchpoint(&I); 5842 return nullptr; 5843 } 5844 case Intrinsic::experimental_gc_statepoint: { 5845 LowerStatepoint(ImmutableStatepoint(&I)); 5846 return nullptr; 5847 } 5848 case Intrinsic::experimental_gc_result: { 5849 visitGCResult(cast<GCResultInst>(I)); 5850 return nullptr; 5851 } 5852 case Intrinsic::experimental_gc_relocate: { 5853 visitGCRelocate(cast<GCRelocateInst>(I)); 5854 return nullptr; 5855 } 5856 case Intrinsic::instrprof_increment: 5857 llvm_unreachable("instrprof failed to lower an increment"); 5858 case Intrinsic::instrprof_value_profile: 5859 llvm_unreachable("instrprof failed to lower a value profiling call"); 5860 case Intrinsic::localescape: { 5861 MachineFunction &MF = DAG.getMachineFunction(); 5862 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); 5863 5864 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission 5865 // is the same on all targets. 5866 for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) { 5867 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts(); 5868 if (isa<ConstantPointerNull>(Arg)) 5869 continue; // Skip null pointers. They represent a hole in index space. 5870 AllocaInst *Slot = cast<AllocaInst>(Arg); 5871 assert(FuncInfo.StaticAllocaMap.count(Slot) && 5872 "can only escape static allocas"); 5873 int FI = FuncInfo.StaticAllocaMap[Slot]; 5874 MCSymbol *FrameAllocSym = 5875 MF.getMMI().getContext().getOrCreateFrameAllocSymbol( 5876 GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx); 5877 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl, 5878 TII->get(TargetOpcode::LOCAL_ESCAPE)) 5879 .addSym(FrameAllocSym) 5880 .addFrameIndex(FI); 5881 } 5882 5883 return nullptr; 5884 } 5885 5886 case Intrinsic::localrecover: { 5887 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx) 5888 MachineFunction &MF = DAG.getMachineFunction(); 5889 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0); 5890 5891 // Get the symbol that defines the frame offset. 5892 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts()); 5893 auto *Idx = cast<ConstantInt>(I.getArgOperand(2)); 5894 unsigned IdxVal = unsigned(Idx->getLimitedValue(INT_MAX)); 5895 MCSymbol *FrameAllocSym = 5896 MF.getMMI().getContext().getOrCreateFrameAllocSymbol( 5897 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal); 5898 5899 // Create a MCSymbol for the label to avoid any target lowering 5900 // that would make this PC relative. 5901 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT); 5902 SDValue OffsetVal = 5903 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym); 5904 5905 // Add the offset to the FP. 5906 Value *FP = I.getArgOperand(1); 5907 SDValue FPVal = getValue(FP); 5908 SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal); 5909 setValue(&I, Add); 5910 5911 return nullptr; 5912 } 5913 5914 case Intrinsic::eh_exceptionpointer: 5915 case Intrinsic::eh_exceptioncode: { 5916 // Get the exception pointer vreg, copy from it, and resize it to fit. 5917 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0)); 5918 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); 5919 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT); 5920 unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC); 5921 SDValue N = 5922 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT); 5923 if (Intrinsic == Intrinsic::eh_exceptioncode) 5924 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32); 5925 setValue(&I, N); 5926 return nullptr; 5927 } 5928 case Intrinsic::xray_customevent: { 5929 // Here we want to make sure that the intrinsic behaves as if it has a 5930 // specific calling convention, and only for x86_64. 5931 // FIXME: Support other platforms later. 5932 const auto &Triple = DAG.getTarget().getTargetTriple(); 5933 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux()) 5934 return nullptr; 5935 5936 SDLoc DL = getCurSDLoc(); 5937 SmallVector<SDValue, 8> Ops; 5938 5939 // We want to say that we always want the arguments in registers. 5940 SDValue LogEntryVal = getValue(I.getArgOperand(0)); 5941 SDValue StrSizeVal = getValue(I.getArgOperand(1)); 5942 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 5943 SDValue Chain = getRoot(); 5944 Ops.push_back(LogEntryVal); 5945 Ops.push_back(StrSizeVal); 5946 Ops.push_back(Chain); 5947 5948 // We need to enforce the calling convention for the callsite, so that 5949 // argument ordering is enforced correctly, and that register allocation can 5950 // see that some registers may be assumed clobbered and have to preserve 5951 // them across calls to the intrinsic. 5952 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL, 5953 DL, NodeTys, Ops); 5954 SDValue patchableNode = SDValue(MN, 0); 5955 DAG.setRoot(patchableNode); 5956 setValue(&I, patchableNode); 5957 return nullptr; 5958 } 5959 case Intrinsic::experimental_deoptimize: 5960 LowerDeoptimizeCall(&I); 5961 return nullptr; 5962 5963 case Intrinsic::experimental_vector_reduce_fadd: 5964 case Intrinsic::experimental_vector_reduce_fmul: 5965 case Intrinsic::experimental_vector_reduce_add: 5966 case Intrinsic::experimental_vector_reduce_mul: 5967 case Intrinsic::experimental_vector_reduce_and: 5968 case Intrinsic::experimental_vector_reduce_or: 5969 case Intrinsic::experimental_vector_reduce_xor: 5970 case Intrinsic::experimental_vector_reduce_smax: 5971 case Intrinsic::experimental_vector_reduce_smin: 5972 case Intrinsic::experimental_vector_reduce_umax: 5973 case Intrinsic::experimental_vector_reduce_umin: 5974 case Intrinsic::experimental_vector_reduce_fmax: 5975 case Intrinsic::experimental_vector_reduce_fmin: { 5976 visitVectorReduce(I, Intrinsic); 5977 return nullptr; 5978 } 5979 5980 } 5981 } 5982 5983 void SelectionDAGBuilder::visitConstrainedFPIntrinsic( 5984 const ConstrainedFPIntrinsic &FPI) { 5985 SDLoc sdl = getCurSDLoc(); 5986 unsigned Opcode; 5987 switch (FPI.getIntrinsicID()) { 5988 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 5989 case Intrinsic::experimental_constrained_fadd: 5990 Opcode = ISD::STRICT_FADD; 5991 break; 5992 case Intrinsic::experimental_constrained_fsub: 5993 Opcode = ISD::STRICT_FSUB; 5994 break; 5995 case Intrinsic::experimental_constrained_fmul: 5996 Opcode = ISD::STRICT_FMUL; 5997 break; 5998 case Intrinsic::experimental_constrained_fdiv: 5999 Opcode = ISD::STRICT_FDIV; 6000 break; 6001 case Intrinsic::experimental_constrained_frem: 6002 Opcode = ISD::STRICT_FREM; 6003 break; 6004 case Intrinsic::experimental_constrained_fma: 6005 Opcode = ISD::STRICT_FMA; 6006 break; 6007 case Intrinsic::experimental_constrained_sqrt: 6008 Opcode = ISD::STRICT_FSQRT; 6009 break; 6010 case Intrinsic::experimental_constrained_pow: 6011 Opcode = ISD::STRICT_FPOW; 6012 break; 6013 case Intrinsic::experimental_constrained_powi: 6014 Opcode = ISD::STRICT_FPOWI; 6015 break; 6016 case Intrinsic::experimental_constrained_sin: 6017 Opcode = ISD::STRICT_FSIN; 6018 break; 6019 case Intrinsic::experimental_constrained_cos: 6020 Opcode = ISD::STRICT_FCOS; 6021 break; 6022 case Intrinsic::experimental_constrained_exp: 6023 Opcode = ISD::STRICT_FEXP; 6024 break; 6025 case Intrinsic::experimental_constrained_exp2: 6026 Opcode = ISD::STRICT_FEXP2; 6027 break; 6028 case Intrinsic::experimental_constrained_log: 6029 Opcode = ISD::STRICT_FLOG; 6030 break; 6031 case Intrinsic::experimental_constrained_log10: 6032 Opcode = ISD::STRICT_FLOG10; 6033 break; 6034 case Intrinsic::experimental_constrained_log2: 6035 Opcode = ISD::STRICT_FLOG2; 6036 break; 6037 case Intrinsic::experimental_constrained_rint: 6038 Opcode = ISD::STRICT_FRINT; 6039 break; 6040 case Intrinsic::experimental_constrained_nearbyint: 6041 Opcode = ISD::STRICT_FNEARBYINT; 6042 break; 6043 } 6044 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6045 SDValue Chain = getRoot(); 6046 SmallVector<EVT, 4> ValueVTs; 6047 ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs); 6048 ValueVTs.push_back(MVT::Other); // Out chain 6049 6050 SDVTList VTs = DAG.getVTList(ValueVTs); 6051 SDValue Result; 6052 if (FPI.isUnaryOp()) 6053 Result = DAG.getNode(Opcode, sdl, VTs, 6054 { Chain, getValue(FPI.getArgOperand(0)) }); 6055 else if (FPI.isTernaryOp()) 6056 Result = DAG.getNode(Opcode, sdl, VTs, 6057 { Chain, getValue(FPI.getArgOperand(0)), 6058 getValue(FPI.getArgOperand(1)), 6059 getValue(FPI.getArgOperand(2)) }); 6060 else 6061 Result = DAG.getNode(Opcode, sdl, VTs, 6062 { Chain, getValue(FPI.getArgOperand(0)), 6063 getValue(FPI.getArgOperand(1)) }); 6064 6065 assert(Result.getNode()->getNumValues() == 2); 6066 SDValue OutChain = Result.getValue(1); 6067 DAG.setRoot(OutChain); 6068 SDValue FPResult = Result.getValue(0); 6069 setValue(&FPI, FPResult); 6070 } 6071 6072 std::pair<SDValue, SDValue> 6073 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI, 6074 const BasicBlock *EHPadBB) { 6075 MachineFunction &MF = DAG.getMachineFunction(); 6076 MachineModuleInfo &MMI = MF.getMMI(); 6077 MCSymbol *BeginLabel = nullptr; 6078 6079 if (EHPadBB) { 6080 // Insert a label before the invoke call to mark the try range. This can be 6081 // used to detect deletion of the invoke via the MachineModuleInfo. 6082 BeginLabel = MMI.getContext().createTempSymbol(); 6083 6084 // For SjLj, keep track of which landing pads go with which invokes 6085 // so as to maintain the ordering of pads in the LSDA. 6086 unsigned CallSiteIndex = MMI.getCurrentCallSite(); 6087 if (CallSiteIndex) { 6088 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex); 6089 LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex); 6090 6091 // Now that the call site is handled, stop tracking it. 6092 MMI.setCurrentCallSite(0); 6093 } 6094 6095 // Both PendingLoads and PendingExports must be flushed here; 6096 // this call might not return. 6097 (void)getRoot(); 6098 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel)); 6099 6100 CLI.setChain(getRoot()); 6101 } 6102 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6103 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 6104 6105 assert((CLI.IsTailCall || Result.second.getNode()) && 6106 "Non-null chain expected with non-tail call!"); 6107 assert((Result.second.getNode() || !Result.first.getNode()) && 6108 "Null value expected with tail call!"); 6109 6110 if (!Result.second.getNode()) { 6111 // As a special case, a null chain means that a tail call has been emitted 6112 // and the DAG root is already updated. 6113 HasTailCall = true; 6114 6115 // Since there's no actual continuation from this block, nothing can be 6116 // relying on us setting vregs for them. 6117 PendingExports.clear(); 6118 } else { 6119 DAG.setRoot(Result.second); 6120 } 6121 6122 if (EHPadBB) { 6123 // Insert a label at the end of the invoke call to mark the try range. This 6124 // can be used to detect deletion of the invoke via the MachineModuleInfo. 6125 MCSymbol *EndLabel = MMI.getContext().createTempSymbol(); 6126 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel)); 6127 6128 // Inform MachineModuleInfo of range. 6129 if (MF.hasEHFunclets()) { 6130 assert(CLI.CS); 6131 WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo(); 6132 EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS.getInstruction()), 6133 BeginLabel, EndLabel); 6134 } else { 6135 MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel); 6136 } 6137 } 6138 6139 return Result; 6140 } 6141 6142 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, 6143 bool isTailCall, 6144 const BasicBlock *EHPadBB) { 6145 auto &DL = DAG.getDataLayout(); 6146 FunctionType *FTy = CS.getFunctionType(); 6147 Type *RetTy = CS.getType(); 6148 6149 TargetLowering::ArgListTy Args; 6150 Args.reserve(CS.arg_size()); 6151 6152 const Value *SwiftErrorVal = nullptr; 6153 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6154 6155 // We can't tail call inside a function with a swifterror argument. Lowering 6156 // does not support this yet. It would have to move into the swifterror 6157 // register before the call. 6158 auto *Caller = CS.getInstruction()->getParent()->getParent(); 6159 if (TLI.supportSwiftError() && 6160 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 6161 isTailCall = false; 6162 6163 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); 6164 i != e; ++i) { 6165 TargetLowering::ArgListEntry Entry; 6166 const Value *V = *i; 6167 6168 // Skip empty types 6169 if (V->getType()->isEmptyTy()) 6170 continue; 6171 6172 SDValue ArgNode = getValue(V); 6173 Entry.Node = ArgNode; Entry.Ty = V->getType(); 6174 6175 Entry.setAttributes(&CS, i - CS.arg_begin()); 6176 6177 // Use swifterror virtual register as input to the call. 6178 if (Entry.IsSwiftError && TLI.supportSwiftError()) { 6179 SwiftErrorVal = V; 6180 // We find the virtual register for the actual swifterror argument. 6181 // Instead of using the Value, we use the virtual register instead. 6182 Entry.Node = DAG.getRegister(FuncInfo 6183 .getOrCreateSwiftErrorVRegUseAt( 6184 CS.getInstruction(), FuncInfo.MBB, V) 6185 .first, 6186 EVT(TLI.getPointerTy(DL))); 6187 } 6188 6189 Args.push_back(Entry); 6190 6191 // If we have an explicit sret argument that is an Instruction, (i.e., it 6192 // might point to function-local memory), we can't meaningfully tail-call. 6193 if (Entry.IsSRet && isa<Instruction>(V)) 6194 isTailCall = false; 6195 } 6196 6197 // Check if target-independent constraints permit a tail call here. 6198 // Target-dependent constraints are checked within TLI->LowerCallTo. 6199 if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget())) 6200 isTailCall = false; 6201 6202 // Disable tail calls if there is an swifterror argument. Targets have not 6203 // been updated to support tail calls. 6204 if (TLI.supportSwiftError() && SwiftErrorVal) 6205 isTailCall = false; 6206 6207 TargetLowering::CallLoweringInfo CLI(DAG); 6208 CLI.setDebugLoc(getCurSDLoc()) 6209 .setChain(getRoot()) 6210 .setCallee(RetTy, FTy, Callee, std::move(Args), CS) 6211 .setTailCall(isTailCall) 6212 .setConvergent(CS.isConvergent()); 6213 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB); 6214 6215 if (Result.first.getNode()) { 6216 const Instruction *Inst = CS.getInstruction(); 6217 Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first); 6218 setValue(Inst, Result.first); 6219 } 6220 6221 // The last element of CLI.InVals has the SDValue for swifterror return. 6222 // Here we copy it to a virtual register and update SwiftErrorMap for 6223 // book-keeping. 6224 if (SwiftErrorVal && TLI.supportSwiftError()) { 6225 // Get the last element of InVals. 6226 SDValue Src = CLI.InVals.back(); 6227 unsigned VReg; bool CreatedVReg; 6228 std::tie(VReg, CreatedVReg) = 6229 FuncInfo.getOrCreateSwiftErrorVRegDefAt(CS.getInstruction()); 6230 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src); 6231 // We update the virtual register for the actual swifterror argument. 6232 if (CreatedVReg) 6233 FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg); 6234 DAG.setRoot(CopyNode); 6235 } 6236 } 6237 6238 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, 6239 SelectionDAGBuilder &Builder) { 6240 6241 // Check to see if this load can be trivially constant folded, e.g. if the 6242 // input is from a string literal. 6243 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) { 6244 // Cast pointer to the type we really want to load. 6245 Type *LoadTy = 6246 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits()); 6247 if (LoadVT.isVector()) 6248 LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements()); 6249 6250 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput), 6251 PointerType::getUnqual(LoadTy)); 6252 6253 if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr( 6254 const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL)) 6255 return Builder.getValue(LoadCst); 6256 } 6257 6258 // Otherwise, we have to emit the load. If the pointer is to unfoldable but 6259 // still constant memory, the input chain can be the entry node. 6260 SDValue Root; 6261 bool ConstantMemory = false; 6262 6263 // Do not serialize (non-volatile) loads of constant memory with anything. 6264 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) { 6265 Root = Builder.DAG.getEntryNode(); 6266 ConstantMemory = true; 6267 } else { 6268 // Do not serialize non-volatile loads against each other. 6269 Root = Builder.DAG.getRoot(); 6270 } 6271 6272 SDValue Ptr = Builder.getValue(PtrVal); 6273 SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, 6274 Ptr, MachinePointerInfo(PtrVal), 6275 /* Alignment = */ 1); 6276 6277 if (!ConstantMemory) 6278 Builder.PendingLoads.push_back(LoadVal.getValue(1)); 6279 return LoadVal; 6280 } 6281 6282 /// Record the value for an instruction that produces an integer result, 6283 /// converting the type where necessary. 6284 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I, 6285 SDValue Value, 6286 bool IsSigned) { 6287 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 6288 I.getType(), true); 6289 if (IsSigned) 6290 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT); 6291 else 6292 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT); 6293 setValue(&I, Value); 6294 } 6295 6296 /// See if we can lower a memcmp call into an optimized form. If so, return 6297 /// true and lower it. Otherwise return false, and it will be lowered like a 6298 /// normal call. 6299 /// The caller already checked that \p I calls the appropriate LibFunc with a 6300 /// correct prototype. 6301 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) { 6302 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1); 6303 const Value *Size = I.getArgOperand(2); 6304 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size); 6305 if (CSize && CSize->getZExtValue() == 0) { 6306 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 6307 I.getType(), true); 6308 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT)); 6309 return true; 6310 } 6311 6312 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 6313 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp( 6314 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS), 6315 getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS)); 6316 if (Res.first.getNode()) { 6317 processIntegerCallValue(I, Res.first, true); 6318 PendingLoads.push_back(Res.second); 6319 return true; 6320 } 6321 6322 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0 6323 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0 6324 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I)) 6325 return false; 6326 6327 // If the target has a fast compare for the given size, it will return a 6328 // preferred load type for that size. Require that the load VT is legal and 6329 // that the target supports unaligned loads of that type. Otherwise, return 6330 // INVALID. 6331 auto hasFastLoadsAndCompare = [&](unsigned NumBits) { 6332 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6333 MVT LVT = TLI.hasFastEqualityCompare(NumBits); 6334 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) { 6335 // TODO: Handle 5 byte compare as 4-byte + 1 byte. 6336 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads. 6337 // TODO: Check alignment of src and dest ptrs. 6338 unsigned DstAS = LHS->getType()->getPointerAddressSpace(); 6339 unsigned SrcAS = RHS->getType()->getPointerAddressSpace(); 6340 if (!TLI.isTypeLegal(LVT) || 6341 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) || 6342 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS)) 6343 LVT = MVT::INVALID_SIMPLE_VALUE_TYPE; 6344 } 6345 6346 return LVT; 6347 }; 6348 6349 // This turns into unaligned loads. We only do this if the target natively 6350 // supports the MVT we'll be loading or if it is small enough (<= 4) that 6351 // we'll only produce a small number of byte loads. 6352 MVT LoadVT; 6353 unsigned NumBitsToCompare = CSize->getZExtValue() * 8; 6354 switch (NumBitsToCompare) { 6355 default: 6356 return false; 6357 case 16: 6358 LoadVT = MVT::i16; 6359 break; 6360 case 32: 6361 LoadVT = MVT::i32; 6362 break; 6363 case 64: 6364 case 128: 6365 case 256: 6366 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare); 6367 break; 6368 } 6369 6370 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE) 6371 return false; 6372 6373 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this); 6374 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this); 6375 6376 // Bitcast to a wide integer type if the loads are vectors. 6377 if (LoadVT.isVector()) { 6378 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits()); 6379 LoadL = DAG.getBitcast(CmpVT, LoadL); 6380 LoadR = DAG.getBitcast(CmpVT, LoadR); 6381 } 6382 6383 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE); 6384 processIntegerCallValue(I, Cmp, false); 6385 return true; 6386 } 6387 6388 /// See if we can lower a memchr call into an optimized form. If so, return 6389 /// true and lower it. Otherwise return false, and it will be lowered like a 6390 /// normal call. 6391 /// The caller already checked that \p I calls the appropriate LibFunc with a 6392 /// correct prototype. 6393 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) { 6394 const Value *Src = I.getArgOperand(0); 6395 const Value *Char = I.getArgOperand(1); 6396 const Value *Length = I.getArgOperand(2); 6397 6398 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 6399 std::pair<SDValue, SDValue> Res = 6400 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(), 6401 getValue(Src), getValue(Char), getValue(Length), 6402 MachinePointerInfo(Src)); 6403 if (Res.first.getNode()) { 6404 setValue(&I, Res.first); 6405 PendingLoads.push_back(Res.second); 6406 return true; 6407 } 6408 6409 return false; 6410 } 6411 6412 /// See if we can lower a mempcpy call into an optimized form. If so, return 6413 /// true and lower it. Otherwise return false, and it will be lowered like a 6414 /// normal call. 6415 /// The caller already checked that \p I calls the appropriate LibFunc with a 6416 /// correct prototype. 6417 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) { 6418 SDValue Dst = getValue(I.getArgOperand(0)); 6419 SDValue Src = getValue(I.getArgOperand(1)); 6420 SDValue Size = getValue(I.getArgOperand(2)); 6421 6422 unsigned DstAlign = DAG.InferPtrAlignment(Dst); 6423 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 6424 unsigned Align = std::min(DstAlign, SrcAlign); 6425 if (Align == 0) // Alignment of one or both could not be inferred. 6426 Align = 1; // 0 and 1 both specify no alignment, but 0 is reserved. 6427 6428 bool isVol = false; 6429 SDLoc sdl = getCurSDLoc(); 6430 6431 // In the mempcpy context we need to pass in a false value for isTailCall 6432 // because the return pointer needs to be adjusted by the size of 6433 // the copied memory. 6434 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Align, isVol, 6435 false, /*isTailCall=*/false, 6436 MachinePointerInfo(I.getArgOperand(0)), 6437 MachinePointerInfo(I.getArgOperand(1))); 6438 assert(MC.getNode() != nullptr && 6439 "** memcpy should not be lowered as TailCall in mempcpy context **"); 6440 DAG.setRoot(MC); 6441 6442 // Check if Size needs to be truncated or extended. 6443 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType()); 6444 6445 // Adjust return pointer to point just past the last dst byte. 6446 SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(), 6447 Dst, Size); 6448 setValue(&I, DstPlusSize); 6449 return true; 6450 } 6451 6452 /// See if we can lower a strcpy call into an optimized form. If so, return 6453 /// true and lower it, otherwise return false and it will be lowered like a 6454 /// normal call. 6455 /// The caller already checked that \p I calls the appropriate LibFunc with a 6456 /// correct prototype. 6457 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) { 6458 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 6459 6460 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 6461 std::pair<SDValue, SDValue> Res = 6462 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(), 6463 getValue(Arg0), getValue(Arg1), 6464 MachinePointerInfo(Arg0), 6465 MachinePointerInfo(Arg1), isStpcpy); 6466 if (Res.first.getNode()) { 6467 setValue(&I, Res.first); 6468 DAG.setRoot(Res.second); 6469 return true; 6470 } 6471 6472 return false; 6473 } 6474 6475 /// See if we can lower a strcmp call into an optimized form. If so, return 6476 /// true and lower it, otherwise return false and it will be lowered like a 6477 /// normal call. 6478 /// The caller already checked that \p I calls the appropriate LibFunc with a 6479 /// correct prototype. 6480 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) { 6481 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 6482 6483 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 6484 std::pair<SDValue, SDValue> Res = 6485 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(), 6486 getValue(Arg0), getValue(Arg1), 6487 MachinePointerInfo(Arg0), 6488 MachinePointerInfo(Arg1)); 6489 if (Res.first.getNode()) { 6490 processIntegerCallValue(I, Res.first, true); 6491 PendingLoads.push_back(Res.second); 6492 return true; 6493 } 6494 6495 return false; 6496 } 6497 6498 /// See if we can lower a strlen call into an optimized form. If so, return 6499 /// true and lower it, otherwise return false and it will be lowered like a 6500 /// normal call. 6501 /// The caller already checked that \p I calls the appropriate LibFunc with a 6502 /// correct prototype. 6503 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) { 6504 const Value *Arg0 = I.getArgOperand(0); 6505 6506 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 6507 std::pair<SDValue, SDValue> Res = 6508 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(), 6509 getValue(Arg0), MachinePointerInfo(Arg0)); 6510 if (Res.first.getNode()) { 6511 processIntegerCallValue(I, Res.first, false); 6512 PendingLoads.push_back(Res.second); 6513 return true; 6514 } 6515 6516 return false; 6517 } 6518 6519 /// See if we can lower a strnlen call into an optimized form. If so, return 6520 /// true and lower it, otherwise return false and it will be lowered like a 6521 /// normal call. 6522 /// The caller already checked that \p I calls the appropriate LibFunc with a 6523 /// correct prototype. 6524 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) { 6525 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 6526 6527 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 6528 std::pair<SDValue, SDValue> Res = 6529 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(), 6530 getValue(Arg0), getValue(Arg1), 6531 MachinePointerInfo(Arg0)); 6532 if (Res.first.getNode()) { 6533 processIntegerCallValue(I, Res.first, false); 6534 PendingLoads.push_back(Res.second); 6535 return true; 6536 } 6537 6538 return false; 6539 } 6540 6541 /// See if we can lower a unary floating-point operation into an SDNode with 6542 /// the specified Opcode. If so, return true and lower it, otherwise return 6543 /// false and it will be lowered like a normal call. 6544 /// The caller already checked that \p I calls the appropriate LibFunc with a 6545 /// correct prototype. 6546 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I, 6547 unsigned Opcode) { 6548 // We already checked this call's prototype; verify it doesn't modify errno. 6549 if (!I.onlyReadsMemory()) 6550 return false; 6551 6552 SDValue Tmp = getValue(I.getArgOperand(0)); 6553 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp)); 6554 return true; 6555 } 6556 6557 /// See if we can lower a binary floating-point operation into an SDNode with 6558 /// the specified Opcode. If so, return true and lower it. Otherwise return 6559 /// false, and it will be lowered like a normal call. 6560 /// The caller already checked that \p I calls the appropriate LibFunc with a 6561 /// correct prototype. 6562 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I, 6563 unsigned Opcode) { 6564 // We already checked this call's prototype; verify it doesn't modify errno. 6565 if (!I.onlyReadsMemory()) 6566 return false; 6567 6568 SDValue Tmp0 = getValue(I.getArgOperand(0)); 6569 SDValue Tmp1 = getValue(I.getArgOperand(1)); 6570 EVT VT = Tmp0.getValueType(); 6571 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1)); 6572 return true; 6573 } 6574 6575 void SelectionDAGBuilder::visitCall(const CallInst &I) { 6576 // Handle inline assembly differently. 6577 if (isa<InlineAsm>(I.getCalledValue())) { 6578 visitInlineAsm(&I); 6579 return; 6580 } 6581 6582 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); 6583 computeUsesVAFloatArgument(I, MMI); 6584 6585 const char *RenameFn = nullptr; 6586 if (Function *F = I.getCalledFunction()) { 6587 if (F->isDeclaration()) { 6588 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) { 6589 if (unsigned IID = II->getIntrinsicID(F)) { 6590 RenameFn = visitIntrinsicCall(I, IID); 6591 if (!RenameFn) 6592 return; 6593 } 6594 } 6595 if (Intrinsic::ID IID = F->getIntrinsicID()) { 6596 RenameFn = visitIntrinsicCall(I, IID); 6597 if (!RenameFn) 6598 return; 6599 } 6600 } 6601 6602 // Check for well-known libc/libm calls. If the function is internal, it 6603 // can't be a library call. Don't do the check if marked as nobuiltin for 6604 // some reason or the call site requires strict floating point semantics. 6605 LibFunc Func; 6606 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() && 6607 F->hasName() && LibInfo->getLibFunc(*F, Func) && 6608 LibInfo->hasOptimizedCodeGen(Func)) { 6609 switch (Func) { 6610 default: break; 6611 case LibFunc_copysign: 6612 case LibFunc_copysignf: 6613 case LibFunc_copysignl: 6614 // We already checked this call's prototype; verify it doesn't modify 6615 // errno. 6616 if (I.onlyReadsMemory()) { 6617 SDValue LHS = getValue(I.getArgOperand(0)); 6618 SDValue RHS = getValue(I.getArgOperand(1)); 6619 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(), 6620 LHS.getValueType(), LHS, RHS)); 6621 return; 6622 } 6623 break; 6624 case LibFunc_fabs: 6625 case LibFunc_fabsf: 6626 case LibFunc_fabsl: 6627 if (visitUnaryFloatCall(I, ISD::FABS)) 6628 return; 6629 break; 6630 case LibFunc_fmin: 6631 case LibFunc_fminf: 6632 case LibFunc_fminl: 6633 if (visitBinaryFloatCall(I, ISD::FMINNUM)) 6634 return; 6635 break; 6636 case LibFunc_fmax: 6637 case LibFunc_fmaxf: 6638 case LibFunc_fmaxl: 6639 if (visitBinaryFloatCall(I, ISD::FMAXNUM)) 6640 return; 6641 break; 6642 case LibFunc_sin: 6643 case LibFunc_sinf: 6644 case LibFunc_sinl: 6645 if (visitUnaryFloatCall(I, ISD::FSIN)) 6646 return; 6647 break; 6648 case LibFunc_cos: 6649 case LibFunc_cosf: 6650 case LibFunc_cosl: 6651 if (visitUnaryFloatCall(I, ISD::FCOS)) 6652 return; 6653 break; 6654 case LibFunc_sqrt: 6655 case LibFunc_sqrtf: 6656 case LibFunc_sqrtl: 6657 case LibFunc_sqrt_finite: 6658 case LibFunc_sqrtf_finite: 6659 case LibFunc_sqrtl_finite: 6660 if (visitUnaryFloatCall(I, ISD::FSQRT)) 6661 return; 6662 break; 6663 case LibFunc_floor: 6664 case LibFunc_floorf: 6665 case LibFunc_floorl: 6666 if (visitUnaryFloatCall(I, ISD::FFLOOR)) 6667 return; 6668 break; 6669 case LibFunc_nearbyint: 6670 case LibFunc_nearbyintf: 6671 case LibFunc_nearbyintl: 6672 if (visitUnaryFloatCall(I, ISD::FNEARBYINT)) 6673 return; 6674 break; 6675 case LibFunc_ceil: 6676 case LibFunc_ceilf: 6677 case LibFunc_ceill: 6678 if (visitUnaryFloatCall(I, ISD::FCEIL)) 6679 return; 6680 break; 6681 case LibFunc_rint: 6682 case LibFunc_rintf: 6683 case LibFunc_rintl: 6684 if (visitUnaryFloatCall(I, ISD::FRINT)) 6685 return; 6686 break; 6687 case LibFunc_round: 6688 case LibFunc_roundf: 6689 case LibFunc_roundl: 6690 if (visitUnaryFloatCall(I, ISD::FROUND)) 6691 return; 6692 break; 6693 case LibFunc_trunc: 6694 case LibFunc_truncf: 6695 case LibFunc_truncl: 6696 if (visitUnaryFloatCall(I, ISD::FTRUNC)) 6697 return; 6698 break; 6699 case LibFunc_log2: 6700 case LibFunc_log2f: 6701 case LibFunc_log2l: 6702 if (visitUnaryFloatCall(I, ISD::FLOG2)) 6703 return; 6704 break; 6705 case LibFunc_exp2: 6706 case LibFunc_exp2f: 6707 case LibFunc_exp2l: 6708 if (visitUnaryFloatCall(I, ISD::FEXP2)) 6709 return; 6710 break; 6711 case LibFunc_memcmp: 6712 if (visitMemCmpCall(I)) 6713 return; 6714 break; 6715 case LibFunc_mempcpy: 6716 if (visitMemPCpyCall(I)) 6717 return; 6718 break; 6719 case LibFunc_memchr: 6720 if (visitMemChrCall(I)) 6721 return; 6722 break; 6723 case LibFunc_strcpy: 6724 if (visitStrCpyCall(I, false)) 6725 return; 6726 break; 6727 case LibFunc_stpcpy: 6728 if (visitStrCpyCall(I, true)) 6729 return; 6730 break; 6731 case LibFunc_strcmp: 6732 if (visitStrCmpCall(I)) 6733 return; 6734 break; 6735 case LibFunc_strlen: 6736 if (visitStrLenCall(I)) 6737 return; 6738 break; 6739 case LibFunc_strnlen: 6740 if (visitStrNLenCall(I)) 6741 return; 6742 break; 6743 } 6744 } 6745 } 6746 6747 SDValue Callee; 6748 if (!RenameFn) 6749 Callee = getValue(I.getCalledValue()); 6750 else 6751 Callee = DAG.getExternalSymbol( 6752 RenameFn, 6753 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())); 6754 6755 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't 6756 // have to do anything here to lower funclet bundles. 6757 assert(!I.hasOperandBundlesOtherThan( 6758 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && 6759 "Cannot lower calls with arbitrary operand bundles!"); 6760 6761 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) 6762 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr); 6763 else 6764 // Check if we can potentially perform a tail call. More detailed checking 6765 // is be done within LowerCallTo, after more information about the call is 6766 // known. 6767 LowerCallTo(&I, Callee, I.isTailCall()); 6768 } 6769 6770 namespace { 6771 6772 /// AsmOperandInfo - This contains information for each constraint that we are 6773 /// lowering. 6774 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { 6775 public: 6776 /// CallOperand - If this is the result output operand or a clobber 6777 /// this is null, otherwise it is the incoming operand to the CallInst. 6778 /// This gets modified as the asm is processed. 6779 SDValue CallOperand; 6780 6781 /// AssignedRegs - If this is a register or register class operand, this 6782 /// contains the set of register corresponding to the operand. 6783 RegsForValue AssignedRegs; 6784 6785 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info) 6786 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) { 6787 } 6788 6789 /// Whether or not this operand accesses memory 6790 bool hasMemory(const TargetLowering &TLI) const { 6791 // Indirect operand accesses access memory. 6792 if (isIndirect) 6793 return true; 6794 6795 for (const auto &Code : Codes) 6796 if (TLI.getConstraintType(Code) == TargetLowering::C_Memory) 6797 return true; 6798 6799 return false; 6800 } 6801 6802 /// getCallOperandValEVT - Return the EVT of the Value* that this operand 6803 /// corresponds to. If there is no Value* for this operand, it returns 6804 /// MVT::Other. 6805 EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI, 6806 const DataLayout &DL) const { 6807 if (!CallOperandVal) return MVT::Other; 6808 6809 if (isa<BasicBlock>(CallOperandVal)) 6810 return TLI.getPointerTy(DL); 6811 6812 llvm::Type *OpTy = CallOperandVal->getType(); 6813 6814 // FIXME: code duplicated from TargetLowering::ParseConstraints(). 6815 // If this is an indirect operand, the operand is a pointer to the 6816 // accessed type. 6817 if (isIndirect) { 6818 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 6819 if (!PtrTy) 6820 report_fatal_error("Indirect operand for inline asm not a pointer!"); 6821 OpTy = PtrTy->getElementType(); 6822 } 6823 6824 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 6825 if (StructType *STy = dyn_cast<StructType>(OpTy)) 6826 if (STy->getNumElements() == 1) 6827 OpTy = STy->getElementType(0); 6828 6829 // If OpTy is not a single value, it may be a struct/union that we 6830 // can tile with integers. 6831 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 6832 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 6833 switch (BitSize) { 6834 default: break; 6835 case 1: 6836 case 8: 6837 case 16: 6838 case 32: 6839 case 64: 6840 case 128: 6841 OpTy = IntegerType::get(Context, BitSize); 6842 break; 6843 } 6844 } 6845 6846 return TLI.getValueType(DL, OpTy, true); 6847 } 6848 }; 6849 6850 typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector; 6851 6852 } // end anonymous namespace 6853 6854 /// Make sure that the output operand \p OpInfo and its corresponding input 6855 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error 6856 /// out). 6857 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, 6858 SDISelAsmOperandInfo &MatchingOpInfo, 6859 SelectionDAG &DAG) { 6860 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT) 6861 return; 6862 6863 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); 6864 const auto &TLI = DAG.getTargetLoweringInfo(); 6865 6866 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 6867 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 6868 OpInfo.ConstraintVT); 6869 std::pair<unsigned, const TargetRegisterClass *> InputRC = 6870 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode, 6871 MatchingOpInfo.ConstraintVT); 6872 if ((OpInfo.ConstraintVT.isInteger() != 6873 MatchingOpInfo.ConstraintVT.isInteger()) || 6874 (MatchRC.second != InputRC.second)) { 6875 // FIXME: error out in a more elegant fashion 6876 report_fatal_error("Unsupported asm: input constraint" 6877 " with a matching output constraint of" 6878 " incompatible type!"); 6879 } 6880 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT; 6881 } 6882 6883 /// Get a direct memory input to behave well as an indirect operand. 6884 /// This may introduce stores, hence the need for a \p Chain. 6885 /// \return The (possibly updated) chain. 6886 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, 6887 SDISelAsmOperandInfo &OpInfo, 6888 SelectionDAG &DAG) { 6889 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6890 6891 // If we don't have an indirect input, put it in the constpool if we can, 6892 // otherwise spill it to a stack slot. 6893 // TODO: This isn't quite right. We need to handle these according to 6894 // the addressing mode that the constraint wants. Also, this may take 6895 // an additional register for the computation and we don't want that 6896 // either. 6897 6898 // If the operand is a float, integer, or vector constant, spill to a 6899 // constant pool entry to get its address. 6900 const Value *OpVal = OpInfo.CallOperandVal; 6901 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) || 6902 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) { 6903 OpInfo.CallOperand = DAG.getConstantPool( 6904 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout())); 6905 return Chain; 6906 } 6907 6908 // Otherwise, create a stack slot and emit a store to it before the asm. 6909 Type *Ty = OpVal->getType(); 6910 auto &DL = DAG.getDataLayout(); 6911 uint64_t TySize = DL.getTypeAllocSize(Ty); 6912 unsigned Align = DL.getPrefTypeAlignment(Ty); 6913 MachineFunction &MF = DAG.getMachineFunction(); 6914 int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false); 6915 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL)); 6916 Chain = DAG.getStore(Chain, Location, OpInfo.CallOperand, StackSlot, 6917 MachinePointerInfo::getFixedStack(MF, SSFI)); 6918 OpInfo.CallOperand = StackSlot; 6919 6920 return Chain; 6921 } 6922 6923 /// GetRegistersForValue - Assign registers (virtual or physical) for the 6924 /// specified operand. We prefer to assign virtual registers, to allow the 6925 /// register allocator to handle the assignment process. However, if the asm 6926 /// uses features that we can't model on machineinstrs, we have SDISel do the 6927 /// allocation. This produces generally horrible, but correct, code. 6928 /// 6929 /// OpInfo describes the operand. 6930 /// 6931 static void GetRegistersForValue(SelectionDAG &DAG, const TargetLowering &TLI, 6932 const SDLoc &DL, 6933 SDISelAsmOperandInfo &OpInfo) { 6934 LLVMContext &Context = *DAG.getContext(); 6935 6936 MachineFunction &MF = DAG.getMachineFunction(); 6937 SmallVector<unsigned, 4> Regs; 6938 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 6939 6940 // If this is a constraint for a single physreg, or a constraint for a 6941 // register class, find it. 6942 std::pair<unsigned, const TargetRegisterClass *> PhysReg = 6943 TLI.getRegForInlineAsmConstraint(&TRI, OpInfo.ConstraintCode, 6944 OpInfo.ConstraintVT); 6945 6946 unsigned NumRegs = 1; 6947 if (OpInfo.ConstraintVT != MVT::Other) { 6948 // If this is a FP input in an integer register (or visa versa) insert a bit 6949 // cast of the input value. More generally, handle any case where the input 6950 // value disagrees with the register class we plan to stick this in. 6951 if (OpInfo.Type == InlineAsm::isInput && PhysReg.second && 6952 !TRI.isTypeLegalForClass(*PhysReg.second, OpInfo.ConstraintVT)) { 6953 // Try to convert to the first EVT that the reg class contains. If the 6954 // types are identical size, use a bitcast to convert (e.g. two differing 6955 // vector types). 6956 MVT RegVT = *TRI.legalclasstypes_begin(*PhysReg.second); 6957 if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) { 6958 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL, 6959 RegVT, OpInfo.CallOperand); 6960 OpInfo.ConstraintVT = RegVT; 6961 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) { 6962 // If the input is a FP value and we want it in FP registers, do a 6963 // bitcast to the corresponding integer type. This turns an f64 value 6964 // into i64, which can be passed with two i32 values on a 32-bit 6965 // machine. 6966 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits()); 6967 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL, 6968 RegVT, OpInfo.CallOperand); 6969 OpInfo.ConstraintVT = RegVT; 6970 } 6971 } 6972 6973 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT); 6974 } 6975 6976 MVT RegVT; 6977 EVT ValueVT = OpInfo.ConstraintVT; 6978 6979 // If this is a constraint for a specific physical register, like {r17}, 6980 // assign it now. 6981 if (unsigned AssignedReg = PhysReg.first) { 6982 const TargetRegisterClass *RC = PhysReg.second; 6983 if (OpInfo.ConstraintVT == MVT::Other) 6984 ValueVT = *TRI.legalclasstypes_begin(*RC); 6985 6986 // Get the actual register value type. This is important, because the user 6987 // may have asked for (e.g.) the AX register in i32 type. We need to 6988 // remember that AX is actually i16 to get the right extension. 6989 RegVT = *TRI.legalclasstypes_begin(*RC); 6990 6991 // This is a explicit reference to a physical register. 6992 Regs.push_back(AssignedReg); 6993 6994 // If this is an expanded reference, add the rest of the regs to Regs. 6995 if (NumRegs != 1) { 6996 TargetRegisterClass::iterator I = RC->begin(); 6997 for (; *I != AssignedReg; ++I) 6998 assert(I != RC->end() && "Didn't find reg!"); 6999 7000 // Already added the first reg. 7001 --NumRegs; ++I; 7002 for (; NumRegs; --NumRegs, ++I) { 7003 assert(I != RC->end() && "Ran out of registers to allocate!"); 7004 Regs.push_back(*I); 7005 } 7006 } 7007 7008 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT); 7009 return; 7010 } 7011 7012 // Otherwise, if this was a reference to an LLVM register class, create vregs 7013 // for this reference. 7014 if (const TargetRegisterClass *RC = PhysReg.second) { 7015 RegVT = *TRI.legalclasstypes_begin(*RC); 7016 if (OpInfo.ConstraintVT == MVT::Other) 7017 ValueVT = RegVT; 7018 7019 // Create the appropriate number of virtual registers. 7020 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 7021 for (; NumRegs; --NumRegs) 7022 Regs.push_back(RegInfo.createVirtualRegister(RC)); 7023 7024 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT); 7025 return; 7026 } 7027 7028 // Otherwise, we couldn't allocate enough registers for this. 7029 } 7030 7031 static unsigned 7032 findMatchingInlineAsmOperand(unsigned OperandNo, 7033 const std::vector<SDValue> &AsmNodeOperands) { 7034 // Scan until we find the definition we already emitted of this operand. 7035 unsigned CurOp = InlineAsm::Op_FirstOperand; 7036 for (; OperandNo; --OperandNo) { 7037 // Advance to the next operand. 7038 unsigned OpFlag = 7039 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue(); 7040 assert((InlineAsm::isRegDefKind(OpFlag) || 7041 InlineAsm::isRegDefEarlyClobberKind(OpFlag) || 7042 InlineAsm::isMemKind(OpFlag)) && 7043 "Skipped past definitions?"); 7044 CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1; 7045 } 7046 return CurOp; 7047 } 7048 7049 /// Fill \p Regs with \p NumRegs new virtual registers of type \p RegVT 7050 /// \return true if it has succeeded, false otherwise 7051 static bool createVirtualRegs(SmallVector<unsigned, 4> &Regs, unsigned NumRegs, 7052 MVT RegVT, SelectionDAG &DAG) { 7053 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7054 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo(); 7055 for (unsigned i = 0, e = NumRegs; i != e; ++i) { 7056 if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) 7057 Regs.push_back(RegInfo.createVirtualRegister(RC)); 7058 else 7059 return false; 7060 } 7061 return true; 7062 } 7063 7064 namespace { 7065 class ExtraFlags { 7066 unsigned Flags = 0; 7067 7068 public: 7069 explicit ExtraFlags(ImmutableCallSite CS) { 7070 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 7071 if (IA->hasSideEffects()) 7072 Flags |= InlineAsm::Extra_HasSideEffects; 7073 if (IA->isAlignStack()) 7074 Flags |= InlineAsm::Extra_IsAlignStack; 7075 if (CS.isConvergent()) 7076 Flags |= InlineAsm::Extra_IsConvergent; 7077 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; 7078 } 7079 7080 void update(const llvm::TargetLowering::AsmOperandInfo &OpInfo) { 7081 // Ideally, we would only check against memory constraints. However, the 7082 // meaning of an Other constraint can be target-specific and we can't easily 7083 // reason about it. Therefore, be conservative and set MayLoad/MayStore 7084 // for Other constraints as well. 7085 if (OpInfo.ConstraintType == TargetLowering::C_Memory || 7086 OpInfo.ConstraintType == TargetLowering::C_Other) { 7087 if (OpInfo.Type == InlineAsm::isInput) 7088 Flags |= InlineAsm::Extra_MayLoad; 7089 else if (OpInfo.Type == InlineAsm::isOutput) 7090 Flags |= InlineAsm::Extra_MayStore; 7091 else if (OpInfo.Type == InlineAsm::isClobber) 7092 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); 7093 } 7094 } 7095 7096 unsigned get() const { return Flags; } 7097 }; 7098 } // namespace 7099 7100 /// visitInlineAsm - Handle a call to an InlineAsm object. 7101 /// 7102 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { 7103 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); 7104 7105 /// ConstraintOperands - Information about all of the constraints. 7106 SDISelAsmOperandInfoVector ConstraintOperands; 7107 7108 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7109 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints( 7110 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS); 7111 7112 bool hasMemory = false; 7113 7114 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore 7115 ExtraFlags ExtraInfo(CS); 7116 7117 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 7118 unsigned ResNo = 0; // ResNo - The result number of the next output. 7119 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { 7120 ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i])); 7121 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); 7122 7123 MVT OpVT = MVT::Other; 7124 7125 // Compute the value type for each operand. 7126 if (OpInfo.Type == InlineAsm::isInput || 7127 (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) { 7128 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); 7129 7130 // Process the call argument. BasicBlocks are labels, currently appearing 7131 // only in asm's. 7132 if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) { 7133 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]); 7134 } else { 7135 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal); 7136 } 7137 7138 OpVT = 7139 OpInfo 7140 .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout()) 7141 .getSimpleVT(); 7142 } 7143 7144 if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) { 7145 // The return value of the call is this value. As such, there is no 7146 // corresponding argument. 7147 assert(!CS.getType()->isVoidTy() && "Bad inline asm!"); 7148 if (StructType *STy = dyn_cast<StructType>(CS.getType())) { 7149 OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), 7150 STy->getElementType(ResNo)); 7151 } else { 7152 assert(ResNo == 0 && "Asm only has one result!"); 7153 OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType()); 7154 } 7155 ++ResNo; 7156 } 7157 7158 OpInfo.ConstraintVT = OpVT; 7159 7160 if (!hasMemory) 7161 hasMemory = OpInfo.hasMemory(TLI); 7162 7163 // Determine if this InlineAsm MayLoad or MayStore based on the constraints. 7164 // FIXME: Could we compute this on OpInfo rather than TargetConstraints[i]? 7165 auto TargetConstraint = TargetConstraints[i]; 7166 7167 // Compute the constraint code and ConstraintType to use. 7168 TLI.ComputeConstraintToUse(TargetConstraint, SDValue()); 7169 7170 ExtraInfo.update(TargetConstraint); 7171 } 7172 7173 SDValue Chain, Flag; 7174 7175 // We won't need to flush pending loads if this asm doesn't touch 7176 // memory and is nonvolatile. 7177 if (hasMemory || IA->hasSideEffects()) 7178 Chain = getRoot(); 7179 else 7180 Chain = DAG.getRoot(); 7181 7182 // Second pass over the constraints: compute which constraint option to use 7183 // and assign registers to constraints that want a specific physreg. 7184 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 7185 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 7186 7187 // If this is an output operand with a matching input operand, look up the 7188 // matching input. If their types mismatch, e.g. one is an integer, the 7189 // other is floating point, or their sizes are different, flag it as an 7190 // error. 7191 if (OpInfo.hasMatchingInput()) { 7192 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 7193 patchMatchingInput(OpInfo, Input, DAG); 7194 } 7195 7196 // Compute the constraint code and ConstraintType to use. 7197 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG); 7198 7199 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 7200 OpInfo.Type == InlineAsm::isClobber) 7201 continue; 7202 7203 // If this is a memory input, and if the operand is not indirect, do what we 7204 // need to to provide an address for the memory input. 7205 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 7206 !OpInfo.isIndirect) { 7207 assert((OpInfo.isMultipleAlternative || 7208 (OpInfo.Type == InlineAsm::isInput)) && 7209 "Can only indirectify direct input operands!"); 7210 7211 // Memory operands really want the address of the value. 7212 Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG); 7213 7214 // There is no longer a Value* corresponding to this operand. 7215 OpInfo.CallOperandVal = nullptr; 7216 7217 // It is now an indirect operand. 7218 OpInfo.isIndirect = true; 7219 } 7220 7221 // If this constraint is for a specific register, allocate it before 7222 // anything else. 7223 if (OpInfo.ConstraintType == TargetLowering::C_Register) 7224 GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo); 7225 } 7226 7227 // Third pass - Loop over all of the operands, assigning virtual or physregs 7228 // to register class operands. 7229 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 7230 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 7231 7232 // C_Register operands have already been allocated, Other/Memory don't need 7233 // to be. 7234 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass) 7235 GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo); 7236 } 7237 7238 // AsmNodeOperands - The operands for the ISD::INLINEASM node. 7239 std::vector<SDValue> AsmNodeOperands; 7240 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain 7241 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol( 7242 IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout()))); 7243 7244 // If we have a !srcloc metadata node associated with it, we want to attach 7245 // this to the ultimately generated inline asm machineinstr. To do this, we 7246 // pass in the third operand as this (potentially null) inline asm MDNode. 7247 const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc"); 7248 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc)); 7249 7250 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore 7251 // bits as operand 3. 7252 AsmNodeOperands.push_back(DAG.getTargetConstant( 7253 ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 7254 7255 // Loop over all of the inputs, copying the operand values into the 7256 // appropriate registers and processing the output regs. 7257 RegsForValue RetValRegs; 7258 7259 // IndirectStoresToEmit - The set of stores to emit after the inline asm node. 7260 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit; 7261 7262 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) { 7263 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i]; 7264 7265 switch (OpInfo.Type) { 7266 case InlineAsm::isOutput: { 7267 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass && 7268 OpInfo.ConstraintType != TargetLowering::C_Register) { 7269 // Memory output, or 'other' output (e.g. 'X' constraint). 7270 assert(OpInfo.isIndirect && "Memory output must be indirect operand"); 7271 7272 unsigned ConstraintID = 7273 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); 7274 assert(ConstraintID != InlineAsm::Constraint_Unknown && 7275 "Failed to convert memory constraint code to constraint id."); 7276 7277 // Add information to the INLINEASM node to know about this output. 7278 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); 7279 OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); 7280 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(), 7281 MVT::i32)); 7282 AsmNodeOperands.push_back(OpInfo.CallOperand); 7283 break; 7284 } 7285 7286 // Otherwise, this is a register or register class output. 7287 7288 // Copy the output from the appropriate register. Find a register that 7289 // we can use. 7290 if (OpInfo.AssignedRegs.Regs.empty()) { 7291 emitInlineAsmError( 7292 CS, "couldn't allocate output register for constraint '" + 7293 Twine(OpInfo.ConstraintCode) + "'"); 7294 return; 7295 } 7296 7297 // If this is an indirect operand, store through the pointer after the 7298 // asm. 7299 if (OpInfo.isIndirect) { 7300 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs, 7301 OpInfo.CallOperandVal)); 7302 } else { 7303 // This is the result value of the call. 7304 assert(!CS.getType()->isVoidTy() && "Bad inline asm!"); 7305 // Concatenate this output onto the outputs list. 7306 RetValRegs.append(OpInfo.AssignedRegs); 7307 } 7308 7309 // Add information to the INLINEASM node to know that this register is 7310 // set. 7311 OpInfo.AssignedRegs 7312 .AddInlineAsmOperands(OpInfo.isEarlyClobber 7313 ? InlineAsm::Kind_RegDefEarlyClobber 7314 : InlineAsm::Kind_RegDef, 7315 false, 0, getCurSDLoc(), DAG, AsmNodeOperands); 7316 break; 7317 } 7318 case InlineAsm::isInput: { 7319 SDValue InOperandVal = OpInfo.CallOperand; 7320 7321 if (OpInfo.isMatchingInputConstraint()) { 7322 // If this is required to match an output register we have already set, 7323 // just use its register. 7324 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(), 7325 AsmNodeOperands); 7326 unsigned OpFlag = 7327 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue(); 7328 if (InlineAsm::isRegDefKind(OpFlag) || 7329 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) { 7330 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs. 7331 if (OpInfo.isIndirect) { 7332 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c 7333 emitInlineAsmError(CS, "inline asm not supported yet:" 7334 " don't know how to handle tied " 7335 "indirect register inputs"); 7336 return; 7337 } 7338 7339 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType(); 7340 SmallVector<unsigned, 4> Regs; 7341 7342 if (!createVirtualRegs(Regs, 7343 InlineAsm::getNumOperandRegisters(OpFlag), 7344 RegVT, DAG)) { 7345 emitInlineAsmError(CS, "inline asm error: This value type register " 7346 "class is not natively supported!"); 7347 return; 7348 } 7349 7350 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType()); 7351 7352 SDLoc dl = getCurSDLoc(); 7353 // Use the produced MatchedRegs object to 7354 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, 7355 CS.getInstruction()); 7356 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, 7357 true, OpInfo.getMatchedOperand(), dl, 7358 DAG, AsmNodeOperands); 7359 break; 7360 } 7361 7362 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!"); 7363 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 && 7364 "Unexpected number of operands"); 7365 // Add information to the INLINEASM node to know about this input. 7366 // See InlineAsm.h isUseOperandTiedToDef. 7367 OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag); 7368 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag, 7369 OpInfo.getMatchedOperand()); 7370 AsmNodeOperands.push_back(DAG.getTargetConstant( 7371 OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 7372 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]); 7373 break; 7374 } 7375 7376 // Treat indirect 'X' constraint as memory. 7377 if (OpInfo.ConstraintType == TargetLowering::C_Other && 7378 OpInfo.isIndirect) 7379 OpInfo.ConstraintType = TargetLowering::C_Memory; 7380 7381 if (OpInfo.ConstraintType == TargetLowering::C_Other) { 7382 std::vector<SDValue> Ops; 7383 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode, 7384 Ops, DAG); 7385 if (Ops.empty()) { 7386 emitInlineAsmError(CS, "invalid operand for inline asm constraint '" + 7387 Twine(OpInfo.ConstraintCode) + "'"); 7388 return; 7389 } 7390 7391 // Add information to the INLINEASM node to know about this input. 7392 unsigned ResOpType = 7393 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size()); 7394 AsmNodeOperands.push_back(DAG.getTargetConstant( 7395 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 7396 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end()); 7397 break; 7398 } 7399 7400 if (OpInfo.ConstraintType == TargetLowering::C_Memory) { 7401 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!"); 7402 assert(InOperandVal.getValueType() == 7403 TLI.getPointerTy(DAG.getDataLayout()) && 7404 "Memory operands expect pointer values"); 7405 7406 unsigned ConstraintID = 7407 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); 7408 assert(ConstraintID != InlineAsm::Constraint_Unknown && 7409 "Failed to convert memory constraint code to constraint id."); 7410 7411 // Add information to the INLINEASM node to know about this input. 7412 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); 7413 ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID); 7414 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, 7415 getCurSDLoc(), 7416 MVT::i32)); 7417 AsmNodeOperands.push_back(InOperandVal); 7418 break; 7419 } 7420 7421 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass || 7422 OpInfo.ConstraintType == TargetLowering::C_Register) && 7423 "Unknown constraint type!"); 7424 7425 // TODO: Support this. 7426 if (OpInfo.isIndirect) { 7427 emitInlineAsmError( 7428 CS, "Don't know how to handle indirect register inputs yet " 7429 "for constraint '" + 7430 Twine(OpInfo.ConstraintCode) + "'"); 7431 return; 7432 } 7433 7434 // Copy the input into the appropriate registers. 7435 if (OpInfo.AssignedRegs.Regs.empty()) { 7436 emitInlineAsmError(CS, "couldn't allocate input reg for constraint '" + 7437 Twine(OpInfo.ConstraintCode) + "'"); 7438 return; 7439 } 7440 7441 SDLoc dl = getCurSDLoc(); 7442 7443 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, 7444 Chain, &Flag, CS.getInstruction()); 7445 7446 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0, 7447 dl, DAG, AsmNodeOperands); 7448 break; 7449 } 7450 case InlineAsm::isClobber: { 7451 // Add the clobbered value to the operand list, so that the register 7452 // allocator is aware that the physreg got clobbered. 7453 if (!OpInfo.AssignedRegs.Regs.empty()) 7454 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber, 7455 false, 0, getCurSDLoc(), DAG, 7456 AsmNodeOperands); 7457 break; 7458 } 7459 } 7460 } 7461 7462 // Finish up input operands. Set the input chain and add the flag last. 7463 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain; 7464 if (Flag.getNode()) AsmNodeOperands.push_back(Flag); 7465 7466 Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(), 7467 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands); 7468 Flag = Chain.getValue(1); 7469 7470 // If this asm returns a register value, copy the result from that register 7471 // and set it as the value of the call. 7472 if (!RetValRegs.Regs.empty()) { 7473 SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), 7474 Chain, &Flag, CS.getInstruction()); 7475 7476 // FIXME: Why don't we do this for inline asms with MRVs? 7477 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) { 7478 EVT ResultType = TLI.getValueType(DAG.getDataLayout(), CS.getType()); 7479 7480 // If any of the results of the inline asm is a vector, it may have the 7481 // wrong width/num elts. This can happen for register classes that can 7482 // contain multiple different value types. The preg or vreg allocated may 7483 // not have the same VT as was expected. Convert it to the right type 7484 // with bit_convert. 7485 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) { 7486 Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(), 7487 ResultType, Val); 7488 7489 } else if (ResultType != Val.getValueType() && 7490 ResultType.isInteger() && Val.getValueType().isInteger()) { 7491 // If a result value was tied to an input value, the computed result may 7492 // have a wider width than the expected result. Extract the relevant 7493 // portion. 7494 Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val); 7495 } 7496 7497 assert(ResultType == Val.getValueType() && "Asm result value mismatch!"); 7498 } 7499 7500 setValue(CS.getInstruction(), Val); 7501 // Don't need to use this as a chain in this case. 7502 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty()) 7503 return; 7504 } 7505 7506 std::vector<std::pair<SDValue, const Value *> > StoresToEmit; 7507 7508 // Process indirect outputs, first output all of the flagged copies out of 7509 // physregs. 7510 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { 7511 RegsForValue &OutRegs = IndirectStoresToEmit[i].first; 7512 const Value *Ptr = IndirectStoresToEmit[i].second; 7513 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), 7514 Chain, &Flag, IA); 7515 StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); 7516 } 7517 7518 // Emit the non-flagged stores from the physregs. 7519 SmallVector<SDValue, 8> OutChains; 7520 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) { 7521 SDValue Val = DAG.getStore(Chain, getCurSDLoc(), StoresToEmit[i].first, 7522 getValue(StoresToEmit[i].second), 7523 MachinePointerInfo(StoresToEmit[i].second)); 7524 OutChains.push_back(Val); 7525 } 7526 7527 if (!OutChains.empty()) 7528 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains); 7529 7530 DAG.setRoot(Chain); 7531 } 7532 7533 void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS, 7534 const Twine &Message) { 7535 LLVMContext &Ctx = *DAG.getContext(); 7536 Ctx.emitError(CS.getInstruction(), Message); 7537 7538 // Make sure we leave the DAG in a valid state 7539 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7540 auto VT = TLI.getValueType(DAG.getDataLayout(), CS.getType()); 7541 setValue(CS.getInstruction(), DAG.getUNDEF(VT)); 7542 } 7543 7544 void SelectionDAGBuilder::visitVAStart(const CallInst &I) { 7545 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(), 7546 MVT::Other, getRoot(), 7547 getValue(I.getArgOperand(0)), 7548 DAG.getSrcValue(I.getArgOperand(0)))); 7549 } 7550 7551 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) { 7552 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7553 const DataLayout &DL = DAG.getDataLayout(); 7554 SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()), 7555 getCurSDLoc(), getRoot(), getValue(I.getOperand(0)), 7556 DAG.getSrcValue(I.getOperand(0)), 7557 DL.getABITypeAlignment(I.getType())); 7558 setValue(&I, V); 7559 DAG.setRoot(V.getValue(1)); 7560 } 7561 7562 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) { 7563 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(), 7564 MVT::Other, getRoot(), 7565 getValue(I.getArgOperand(0)), 7566 DAG.getSrcValue(I.getArgOperand(0)))); 7567 } 7568 7569 void SelectionDAGBuilder::visitVACopy(const CallInst &I) { 7570 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(), 7571 MVT::Other, getRoot(), 7572 getValue(I.getArgOperand(0)), 7573 getValue(I.getArgOperand(1)), 7574 DAG.getSrcValue(I.getArgOperand(0)), 7575 DAG.getSrcValue(I.getArgOperand(1)))); 7576 } 7577 7578 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG, 7579 const Instruction &I, 7580 SDValue Op) { 7581 const MDNode *Range = I.getMetadata(LLVMContext::MD_range); 7582 if (!Range) 7583 return Op; 7584 7585 ConstantRange CR = getConstantRangeFromMetadata(*Range); 7586 if (CR.isFullSet() || CR.isEmptySet() || CR.isWrappedSet()) 7587 return Op; 7588 7589 APInt Lo = CR.getUnsignedMin(); 7590 if (!Lo.isMinValue()) 7591 return Op; 7592 7593 APInt Hi = CR.getUnsignedMax(); 7594 unsigned Bits = Hi.getActiveBits(); 7595 7596 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 7597 7598 SDLoc SL = getCurSDLoc(); 7599 7600 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op, 7601 DAG.getValueType(SmallVT)); 7602 unsigned NumVals = Op.getNode()->getNumValues(); 7603 if (NumVals == 1) 7604 return ZExt; 7605 7606 SmallVector<SDValue, 4> Ops; 7607 7608 Ops.push_back(ZExt); 7609 for (unsigned I = 1; I != NumVals; ++I) 7610 Ops.push_back(Op.getValue(I)); 7611 7612 return DAG.getMergeValues(Ops, SL); 7613 } 7614 7615 /// \brief Populate a CallLowerinInfo (into \p CLI) based on the properties of 7616 /// the call being lowered. 7617 /// 7618 /// This is a helper for lowering intrinsics that follow a target calling 7619 /// convention or require stack pointer adjustment. Only a subset of the 7620 /// intrinsic's operands need to participate in the calling convention. 7621 void SelectionDAGBuilder::populateCallLoweringInfo( 7622 TargetLowering::CallLoweringInfo &CLI, ImmutableCallSite CS, 7623 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, 7624 bool IsPatchPoint) { 7625 TargetLowering::ArgListTy Args; 7626 Args.reserve(NumArgs); 7627 7628 // Populate the argument list. 7629 // Attributes for args start at offset 1, after the return attribute. 7630 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; 7631 ArgI != ArgE; ++ArgI) { 7632 const Value *V = CS->getOperand(ArgI); 7633 7634 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 7635 7636 TargetLowering::ArgListEntry Entry; 7637 Entry.Node = getValue(V); 7638 Entry.Ty = V->getType(); 7639 Entry.setAttributes(&CS, ArgIdx); 7640 Args.push_back(Entry); 7641 } 7642 7643 CLI.setDebugLoc(getCurSDLoc()) 7644 .setChain(getRoot()) 7645 .setCallee(CS.getCallingConv(), ReturnTy, Callee, std::move(Args)) 7646 .setDiscardResult(CS->use_empty()) 7647 .setIsPatchPoint(IsPatchPoint); 7648 } 7649 7650 /// \brief Add a stack map intrinsic call's live variable operands to a stackmap 7651 /// or patchpoint target node's operand list. 7652 /// 7653 /// Constants are converted to TargetConstants purely as an optimization to 7654 /// avoid constant materialization and register allocation. 7655 /// 7656 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not 7657 /// generate addess computation nodes, and so ExpandISelPseudo can convert the 7658 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids 7659 /// address materialization and register allocation, but may also be required 7660 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an 7661 /// alloca in the entry block, then the runtime may assume that the alloca's 7662 /// StackMap location can be read immediately after compilation and that the 7663 /// location is valid at any point during execution (this is similar to the 7664 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were 7665 /// only available in a register, then the runtime would need to trap when 7666 /// execution reaches the StackMap in order to read the alloca's location. 7667 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx, 7668 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops, 7669 SelectionDAGBuilder &Builder) { 7670 for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) { 7671 SDValue OpVal = Builder.getValue(CS.getArgument(i)); 7672 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) { 7673 Ops.push_back( 7674 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64)); 7675 Ops.push_back( 7676 Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64)); 7677 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) { 7678 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo(); 7679 Ops.push_back(Builder.DAG.getTargetFrameIndex( 7680 FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout()))); 7681 } else 7682 Ops.push_back(OpVal); 7683 } 7684 } 7685 7686 /// \brief Lower llvm.experimental.stackmap directly to its target opcode. 7687 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { 7688 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>, 7689 // [live variables...]) 7690 7691 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value."); 7692 7693 SDValue Chain, InFlag, Callee, NullPtr; 7694 SmallVector<SDValue, 32> Ops; 7695 7696 SDLoc DL = getCurSDLoc(); 7697 Callee = getValue(CI.getCalledValue()); 7698 NullPtr = DAG.getIntPtrConstant(0, DL, true); 7699 7700 // The stackmap intrinsic only records the live variables (the arguemnts 7701 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 7702 // intrinsic, this won't be lowered to a function call. This means we don't 7703 // have to worry about calling conventions and target specific lowering code. 7704 // Instead we perform the call lowering right here. 7705 // 7706 // chain, flag = CALLSEQ_START(chain, 0, 0) 7707 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag) 7708 // chain, flag = CALLSEQ_END(chain, 0, 0, flag) 7709 // 7710 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL); 7711 InFlag = Chain.getValue(1); 7712 7713 // Add the <id> and <numBytes> constants. 7714 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos)); 7715 Ops.push_back(DAG.getTargetConstant( 7716 cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64)); 7717 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos)); 7718 Ops.push_back(DAG.getTargetConstant( 7719 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL, 7720 MVT::i32)); 7721 7722 // Push live variables for the stack map. 7723 addStackMapLiveVars(&CI, 2, DL, Ops, *this); 7724 7725 // We are not pushing any register mask info here on the operands list, 7726 // because the stackmap doesn't clobber anything. 7727 7728 // Push the chain and the glue flag. 7729 Ops.push_back(Chain); 7730 Ops.push_back(InFlag); 7731 7732 // Create the STACKMAP node. 7733 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7734 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops); 7735 Chain = SDValue(SM, 0); 7736 InFlag = Chain.getValue(1); 7737 7738 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL); 7739 7740 // Stackmaps don't generate values, so nothing goes into the NodeMap. 7741 7742 // Set the root to the target-lowered call chain. 7743 DAG.setRoot(Chain); 7744 7745 // Inform the Frame Information that we have a stackmap in this function. 7746 FuncInfo.MF->getFrameInfo().setHasStackMap(); 7747 } 7748 7749 /// \brief Lower llvm.experimental.patchpoint directly to its target opcode. 7750 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS, 7751 const BasicBlock *EHPadBB) { 7752 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, 7753 // i32 <numBytes>, 7754 // i8* <target>, 7755 // i32 <numArgs>, 7756 // [Args...], 7757 // [live variables...]) 7758 7759 CallingConv::ID CC = CS.getCallingConv(); 7760 bool IsAnyRegCC = CC == CallingConv::AnyReg; 7761 bool HasDef = !CS->getType()->isVoidTy(); 7762 SDLoc dl = getCurSDLoc(); 7763 SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos)); 7764 7765 // Handle immediate and symbolic callees. 7766 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee)) 7767 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl, 7768 /*isTarget=*/true); 7769 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee)) 7770 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(), 7771 SDLoc(SymbolicCallee), 7772 SymbolicCallee->getValueType(0)); 7773 7774 // Get the real number of arguments participating in the call <numArgs> 7775 SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos)); 7776 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue(); 7777 7778 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 7779 // Intrinsics include all meta-operands up to but not including CC. 7780 unsigned NumMetaOpers = PatchPointOpers::CCPos; 7781 assert(CS.arg_size() >= NumMetaOpers + NumArgs && 7782 "Not enough arguments provided to the patchpoint intrinsic"); 7783 7784 // For AnyRegCC the arguments are lowered later on manually. 7785 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 7786 Type *ReturnTy = 7787 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType(); 7788 7789 TargetLowering::CallLoweringInfo CLI(DAG); 7790 populateCallLoweringInfo(CLI, CS, NumMetaOpers, NumCallArgs, Callee, ReturnTy, 7791 true); 7792 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB); 7793 7794 SDNode *CallEnd = Result.second.getNode(); 7795 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg)) 7796 CallEnd = CallEnd->getOperand(0).getNode(); 7797 7798 /// Get a call instruction from the call sequence chain. 7799 /// Tail calls are not allowed. 7800 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END && 7801 "Expected a callseq node."); 7802 SDNode *Call = CallEnd->getOperand(0).getNode(); 7803 bool HasGlue = Call->getGluedNode(); 7804 7805 // Replace the target specific call node with the patchable intrinsic. 7806 SmallVector<SDValue, 8> Ops; 7807 7808 // Add the <id> and <numBytes> constants. 7809 SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos)); 7810 Ops.push_back(DAG.getTargetConstant( 7811 cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64)); 7812 SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos)); 7813 Ops.push_back(DAG.getTargetConstant( 7814 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl, 7815 MVT::i32)); 7816 7817 // Add the callee. 7818 Ops.push_back(Callee); 7819 7820 // Adjust <numArgs> to account for any arguments that have been passed on the 7821 // stack instead. 7822 // Call Node: Chain, Target, {Args}, RegMask, [Glue] 7823 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3); 7824 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs; 7825 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32)); 7826 7827 // Add the calling convention 7828 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32)); 7829 7830 // Add the arguments we omitted previously. The register allocator should 7831 // place these in any free register. 7832 if (IsAnyRegCC) 7833 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) 7834 Ops.push_back(getValue(CS.getArgument(i))); 7835 7836 // Push the arguments from the call instruction up to the register mask. 7837 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1; 7838 Ops.append(Call->op_begin() + 2, e); 7839 7840 // Push live variables for the stack map. 7841 addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this); 7842 7843 // Push the register mask info. 7844 if (HasGlue) 7845 Ops.push_back(*(Call->op_end()-2)); 7846 else 7847 Ops.push_back(*(Call->op_end()-1)); 7848 7849 // Push the chain (this is originally the first operand of the call, but 7850 // becomes now the last or second to last operand). 7851 Ops.push_back(*(Call->op_begin())); 7852 7853 // Push the glue flag (last operand). 7854 if (HasGlue) 7855 Ops.push_back(*(Call->op_end()-1)); 7856 7857 SDVTList NodeTys; 7858 if (IsAnyRegCC && HasDef) { 7859 // Create the return types based on the intrinsic definition 7860 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7861 SmallVector<EVT, 3> ValueVTs; 7862 ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs); 7863 assert(ValueVTs.size() == 1 && "Expected only one return value type."); 7864 7865 // There is always a chain and a glue type at the end 7866 ValueVTs.push_back(MVT::Other); 7867 ValueVTs.push_back(MVT::Glue); 7868 NodeTys = DAG.getVTList(ValueVTs); 7869 } else 7870 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7871 7872 // Replace the target specific call node with a PATCHPOINT node. 7873 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT, 7874 dl, NodeTys, Ops); 7875 7876 // Update the NodeMap. 7877 if (HasDef) { 7878 if (IsAnyRegCC) 7879 setValue(CS.getInstruction(), SDValue(MN, 0)); 7880 else 7881 setValue(CS.getInstruction(), Result.first); 7882 } 7883 7884 // Fixup the consumers of the intrinsic. The chain and glue may be used in the 7885 // call sequence. Furthermore the location of the chain and glue can change 7886 // when the AnyReg calling convention is used and the intrinsic returns a 7887 // value. 7888 if (IsAnyRegCC && HasDef) { 7889 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)}; 7890 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)}; 7891 DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 7892 } else 7893 DAG.ReplaceAllUsesWith(Call, MN); 7894 DAG.DeleteNode(Call); 7895 7896 // Inform the Frame Information that we have a patchpoint in this function. 7897 FuncInfo.MF->getFrameInfo().setHasPatchPoint(); 7898 } 7899 7900 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I, 7901 unsigned Intrinsic) { 7902 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7903 SDValue Op1 = getValue(I.getArgOperand(0)); 7904 SDValue Op2; 7905 if (I.getNumArgOperands() > 1) 7906 Op2 = getValue(I.getArgOperand(1)); 7907 SDLoc dl = getCurSDLoc(); 7908 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 7909 SDValue Res; 7910 FastMathFlags FMF; 7911 if (isa<FPMathOperator>(I)) 7912 FMF = I.getFastMathFlags(); 7913 SDNodeFlags SDFlags; 7914 SDFlags.setNoNaNs(FMF.noNaNs()); 7915 7916 switch (Intrinsic) { 7917 case Intrinsic::experimental_vector_reduce_fadd: 7918 if (FMF.unsafeAlgebra()) 7919 Res = DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2); 7920 else 7921 Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2); 7922 break; 7923 case Intrinsic::experimental_vector_reduce_fmul: 7924 if (FMF.unsafeAlgebra()) 7925 Res = DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2); 7926 else 7927 Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2); 7928 break; 7929 case Intrinsic::experimental_vector_reduce_add: 7930 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1); 7931 break; 7932 case Intrinsic::experimental_vector_reduce_mul: 7933 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1); 7934 break; 7935 case Intrinsic::experimental_vector_reduce_and: 7936 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1); 7937 break; 7938 case Intrinsic::experimental_vector_reduce_or: 7939 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1); 7940 break; 7941 case Intrinsic::experimental_vector_reduce_xor: 7942 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1); 7943 break; 7944 case Intrinsic::experimental_vector_reduce_smax: 7945 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1); 7946 break; 7947 case Intrinsic::experimental_vector_reduce_smin: 7948 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1); 7949 break; 7950 case Intrinsic::experimental_vector_reduce_umax: 7951 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1); 7952 break; 7953 case Intrinsic::experimental_vector_reduce_umin: 7954 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1); 7955 break; 7956 case Intrinsic::experimental_vector_reduce_fmax: { 7957 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags); 7958 break; 7959 } 7960 case Intrinsic::experimental_vector_reduce_fmin: { 7961 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags); 7962 break; 7963 } 7964 default: 7965 llvm_unreachable("Unhandled vector reduce intrinsic"); 7966 } 7967 setValue(&I, Res); 7968 } 7969 7970 /// Returns an AttributeList representing the attributes applied to the return 7971 /// value of the given call. 7972 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) { 7973 SmallVector<Attribute::AttrKind, 2> Attrs; 7974 if (CLI.RetSExt) 7975 Attrs.push_back(Attribute::SExt); 7976 if (CLI.RetZExt) 7977 Attrs.push_back(Attribute::ZExt); 7978 if (CLI.IsInReg) 7979 Attrs.push_back(Attribute::InReg); 7980 7981 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex, 7982 Attrs); 7983 } 7984 7985 /// TargetLowering::LowerCallTo - This is the default LowerCallTo 7986 /// implementation, which just calls LowerCall. 7987 /// FIXME: When all targets are 7988 /// migrated to using LowerCall, this hook should be integrated into SDISel. 7989 std::pair<SDValue, SDValue> 7990 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { 7991 // Handle the incoming return values from the call. 7992 CLI.Ins.clear(); 7993 Type *OrigRetTy = CLI.RetTy; 7994 SmallVector<EVT, 4> RetTys; 7995 SmallVector<uint64_t, 4> Offsets; 7996 auto &DL = CLI.DAG.getDataLayout(); 7997 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets); 7998 7999 if (CLI.IsPostTypeLegalization) { 8000 // If we are lowering a libcall after legalization, split the return type. 8001 SmallVector<EVT, 4> OldRetTys = std::move(RetTys); 8002 SmallVector<uint64_t, 4> OldOffsets = std::move(Offsets); 8003 for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) { 8004 EVT RetVT = OldRetTys[i]; 8005 uint64_t Offset = OldOffsets[i]; 8006 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT); 8007 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT); 8008 unsigned RegisterVTSize = RegisterVT.getSizeInBits(); 8009 RetTys.append(NumRegs, RegisterVT); 8010 for (unsigned j = 0; j != NumRegs; ++j) 8011 Offsets.push_back(Offset + j * RegisterVTSize); 8012 } 8013 } 8014 8015 SmallVector<ISD::OutputArg, 4> Outs; 8016 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL); 8017 8018 bool CanLowerReturn = 8019 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(), 8020 CLI.IsVarArg, Outs, CLI.RetTy->getContext()); 8021 8022 SDValue DemoteStackSlot; 8023 int DemoteStackIdx = -100; 8024 if (!CanLowerReturn) { 8025 // FIXME: equivalent assert? 8026 // assert(!CS.hasInAllocaArgument() && 8027 // "sret demotion is incompatible with inalloca"); 8028 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy); 8029 unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy); 8030 MachineFunction &MF = CLI.DAG.getMachineFunction(); 8031 DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false); 8032 Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy); 8033 8034 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL)); 8035 ArgListEntry Entry; 8036 Entry.Node = DemoteStackSlot; 8037 Entry.Ty = StackSlotPtrType; 8038 Entry.IsSExt = false; 8039 Entry.IsZExt = false; 8040 Entry.IsInReg = false; 8041 Entry.IsSRet = true; 8042 Entry.IsNest = false; 8043 Entry.IsByVal = false; 8044 Entry.IsReturned = false; 8045 Entry.IsSwiftSelf = false; 8046 Entry.IsSwiftError = false; 8047 Entry.Alignment = Align; 8048 CLI.getArgs().insert(CLI.getArgs().begin(), Entry); 8049 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext()); 8050 8051 // sret demotion isn't compatible with tail-calls, since the sret argument 8052 // points into the callers stack frame. 8053 CLI.IsTailCall = false; 8054 } else { 8055 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 8056 EVT VT = RetTys[I]; 8057 MVT RegisterVT = 8058 getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT); 8059 unsigned NumRegs = 8060 getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT); 8061 for (unsigned i = 0; i != NumRegs; ++i) { 8062 ISD::InputArg MyFlags; 8063 MyFlags.VT = RegisterVT; 8064 MyFlags.ArgVT = VT; 8065 MyFlags.Used = CLI.IsReturnValueUsed; 8066 if (CLI.RetSExt) 8067 MyFlags.Flags.setSExt(); 8068 if (CLI.RetZExt) 8069 MyFlags.Flags.setZExt(); 8070 if (CLI.IsInReg) 8071 MyFlags.Flags.setInReg(); 8072 CLI.Ins.push_back(MyFlags); 8073 } 8074 } 8075 } 8076 8077 // We push in swifterror return as the last element of CLI.Ins. 8078 ArgListTy &Args = CLI.getArgs(); 8079 if (supportSwiftError()) { 8080 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 8081 if (Args[i].IsSwiftError) { 8082 ISD::InputArg MyFlags; 8083 MyFlags.VT = getPointerTy(DL); 8084 MyFlags.ArgVT = EVT(getPointerTy(DL)); 8085 MyFlags.Flags.setSwiftError(); 8086 CLI.Ins.push_back(MyFlags); 8087 } 8088 } 8089 } 8090 8091 // Handle all of the outgoing arguments. 8092 CLI.Outs.clear(); 8093 CLI.OutVals.clear(); 8094 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 8095 SmallVector<EVT, 4> ValueVTs; 8096 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs); 8097 // FIXME: Split arguments if CLI.IsPostTypeLegalization 8098 Type *FinalType = Args[i].Ty; 8099 if (Args[i].IsByVal) 8100 FinalType = cast<PointerType>(Args[i].Ty)->getElementType(); 8101 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters( 8102 FinalType, CLI.CallConv, CLI.IsVarArg); 8103 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues; 8104 ++Value) { 8105 EVT VT = ValueVTs[Value]; 8106 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext()); 8107 SDValue Op = SDValue(Args[i].Node.getNode(), 8108 Args[i].Node.getResNo() + Value); 8109 ISD::ArgFlagsTy Flags; 8110 8111 // Certain targets (such as MIPS), may have a different ABI alignment 8112 // for a type depending on the context. Give the target a chance to 8113 // specify the alignment it wants. 8114 unsigned OriginalAlignment = getABIAlignmentForCallingConv(ArgTy, DL); 8115 8116 if (Args[i].IsZExt) 8117 Flags.setZExt(); 8118 if (Args[i].IsSExt) 8119 Flags.setSExt(); 8120 if (Args[i].IsInReg) { 8121 // If we are using vectorcall calling convention, a structure that is 8122 // passed InReg - is surely an HVA 8123 if (CLI.CallConv == CallingConv::X86_VectorCall && 8124 isa<StructType>(FinalType)) { 8125 // The first value of a structure is marked 8126 if (0 == Value) 8127 Flags.setHvaStart(); 8128 Flags.setHva(); 8129 } 8130 // Set InReg Flag 8131 Flags.setInReg(); 8132 } 8133 if (Args[i].IsSRet) 8134 Flags.setSRet(); 8135 if (Args[i].IsSwiftSelf) 8136 Flags.setSwiftSelf(); 8137 if (Args[i].IsSwiftError) 8138 Flags.setSwiftError(); 8139 if (Args[i].IsByVal) 8140 Flags.setByVal(); 8141 if (Args[i].IsInAlloca) { 8142 Flags.setInAlloca(); 8143 // Set the byval flag for CCAssignFn callbacks that don't know about 8144 // inalloca. This way we can know how many bytes we should've allocated 8145 // and how many bytes a callee cleanup function will pop. If we port 8146 // inalloca to more targets, we'll have to add custom inalloca handling 8147 // in the various CC lowering callbacks. 8148 Flags.setByVal(); 8149 } 8150 if (Args[i].IsByVal || Args[i].IsInAlloca) { 8151 PointerType *Ty = cast<PointerType>(Args[i].Ty); 8152 Type *ElementTy = Ty->getElementType(); 8153 Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); 8154 // For ByVal, alignment should come from FE. BE will guess if this 8155 // info is not there but there are cases it cannot get right. 8156 unsigned FrameAlign; 8157 if (Args[i].Alignment) 8158 FrameAlign = Args[i].Alignment; 8159 else 8160 FrameAlign = getByValTypeAlignment(ElementTy, DL); 8161 Flags.setByValAlign(FrameAlign); 8162 } 8163 if (Args[i].IsNest) 8164 Flags.setNest(); 8165 if (NeedsRegBlock) 8166 Flags.setInConsecutiveRegs(); 8167 Flags.setOrigAlign(OriginalAlignment); 8168 8169 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT); 8170 unsigned NumParts = 8171 getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT); 8172 SmallVector<SDValue, 4> Parts(NumParts); 8173 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 8174 8175 if (Args[i].IsSExt) 8176 ExtendKind = ISD::SIGN_EXTEND; 8177 else if (Args[i].IsZExt) 8178 ExtendKind = ISD::ZERO_EXTEND; 8179 8180 // Conservatively only handle 'returned' on non-vectors for now 8181 if (Args[i].IsReturned && !Op.getValueType().isVector()) { 8182 assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues && 8183 "unexpected use of 'returned'"); 8184 // Before passing 'returned' to the target lowering code, ensure that 8185 // either the register MVT and the actual EVT are the same size or that 8186 // the return value and argument are extended in the same way; in these 8187 // cases it's safe to pass the argument register value unchanged as the 8188 // return register value (although it's at the target's option whether 8189 // to do so) 8190 // TODO: allow code generation to take advantage of partially preserved 8191 // registers rather than clobbering the entire register when the 8192 // parameter extension method is not compatible with the return 8193 // extension method 8194 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) || 8195 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt && 8196 CLI.RetZExt == Args[i].IsZExt)) 8197 Flags.setReturned(); 8198 } 8199 8200 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, 8201 CLI.CS.getInstruction(), ExtendKind, true); 8202 8203 for (unsigned j = 0; j != NumParts; ++j) { 8204 // if it isn't first piece, alignment must be 1 8205 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT, 8206 i < CLI.NumFixedArgs, 8207 i, j*Parts[j].getValueType().getStoreSize()); 8208 if (NumParts > 1 && j == 0) 8209 MyFlags.Flags.setSplit(); 8210 else if (j != 0) { 8211 MyFlags.Flags.setOrigAlign(1); 8212 if (j == NumParts - 1) 8213 MyFlags.Flags.setSplitEnd(); 8214 } 8215 8216 CLI.Outs.push_back(MyFlags); 8217 CLI.OutVals.push_back(Parts[j]); 8218 } 8219 8220 if (NeedsRegBlock && Value == NumValues - 1) 8221 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast(); 8222 } 8223 } 8224 8225 SmallVector<SDValue, 4> InVals; 8226 CLI.Chain = LowerCall(CLI, InVals); 8227 8228 // Update CLI.InVals to use outside of this function. 8229 CLI.InVals = InVals; 8230 8231 // Verify that the target's LowerCall behaved as expected. 8232 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other && 8233 "LowerCall didn't return a valid chain!"); 8234 assert((!CLI.IsTailCall || InVals.empty()) && 8235 "LowerCall emitted a return value for a tail call!"); 8236 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) && 8237 "LowerCall didn't emit the correct number of values!"); 8238 8239 // For a tail call, the return value is merely live-out and there aren't 8240 // any nodes in the DAG representing it. Return a special value to 8241 // indicate that a tail call has been emitted and no more Instructions 8242 // should be processed in the current block. 8243 if (CLI.IsTailCall) { 8244 CLI.DAG.setRoot(CLI.Chain); 8245 return std::make_pair(SDValue(), SDValue()); 8246 } 8247 8248 #ifndef NDEBUG 8249 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) { 8250 assert(InVals[i].getNode() && "LowerCall emitted a null value!"); 8251 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() && 8252 "LowerCall emitted a value with the wrong type!"); 8253 } 8254 #endif 8255 8256 SmallVector<SDValue, 4> ReturnValues; 8257 if (!CanLowerReturn) { 8258 // The instruction result is the result of loading from the 8259 // hidden sret parameter. 8260 SmallVector<EVT, 1> PVTs; 8261 Type *PtrRetTy = PointerType::getUnqual(OrigRetTy); 8262 8263 ComputeValueVTs(*this, DL, PtrRetTy, PVTs); 8264 assert(PVTs.size() == 1 && "Pointers should fit in one register"); 8265 EVT PtrVT = PVTs[0]; 8266 8267 unsigned NumValues = RetTys.size(); 8268 ReturnValues.resize(NumValues); 8269 SmallVector<SDValue, 4> Chains(NumValues); 8270 8271 // An aggregate return value cannot wrap around the address space, so 8272 // offsets to its parts don't wrap either. 8273 SDNodeFlags Flags; 8274 Flags.setNoUnsignedWrap(true); 8275 8276 for (unsigned i = 0; i < NumValues; ++i) { 8277 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot, 8278 CLI.DAG.getConstant(Offsets[i], CLI.DL, 8279 PtrVT), Flags); 8280 SDValue L = CLI.DAG.getLoad( 8281 RetTys[i], CLI.DL, CLI.Chain, Add, 8282 MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(), 8283 DemoteStackIdx, Offsets[i]), 8284 /* Alignment = */ 1); 8285 ReturnValues[i] = L; 8286 Chains[i] = L.getValue(1); 8287 } 8288 8289 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains); 8290 } else { 8291 // Collect the legal value parts into potentially illegal values 8292 // that correspond to the original function's return values. 8293 Optional<ISD::NodeType> AssertOp; 8294 if (CLI.RetSExt) 8295 AssertOp = ISD::AssertSext; 8296 else if (CLI.RetZExt) 8297 AssertOp = ISD::AssertZext; 8298 unsigned CurReg = 0; 8299 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 8300 EVT VT = RetTys[I]; 8301 MVT RegisterVT = 8302 getRegisterTypeForCallingConv(CLI.RetTy->getContext(), VT); 8303 unsigned NumRegs = 8304 getNumRegistersForCallingConv(CLI.RetTy->getContext(), VT); 8305 8306 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg], 8307 NumRegs, RegisterVT, VT, nullptr, 8308 AssertOp, true)); 8309 CurReg += NumRegs; 8310 } 8311 8312 // For a function returning void, there is no return value. We can't create 8313 // such a node, so we just return a null return value in that case. In 8314 // that case, nothing will actually look at the value. 8315 if (ReturnValues.empty()) 8316 return std::make_pair(SDValue(), CLI.Chain); 8317 } 8318 8319 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL, 8320 CLI.DAG.getVTList(RetTys), ReturnValues); 8321 return std::make_pair(Res, CLI.Chain); 8322 } 8323 8324 void TargetLowering::LowerOperationWrapper(SDNode *N, 8325 SmallVectorImpl<SDValue> &Results, 8326 SelectionDAG &DAG) const { 8327 if (SDValue Res = LowerOperation(SDValue(N, 0), DAG)) 8328 Results.push_back(Res); 8329 } 8330 8331 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 8332 llvm_unreachable("LowerOperation not implemented for this target!"); 8333 } 8334 8335 void 8336 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) { 8337 SDValue Op = getNonRegisterValue(V); 8338 assert((Op.getOpcode() != ISD::CopyFromReg || 8339 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 8340 "Copy from a reg to the same reg!"); 8341 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg"); 8342 8343 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8344 // If this is an InlineAsm we have to match the registers required, not the 8345 // notional registers required by the type. 8346 8347 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, 8348 V->getType(), isABIRegCopy(V)); 8349 SDValue Chain = DAG.getEntryNode(); 8350 8351 ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) == 8352 FuncInfo.PreferredExtendType.end()) 8353 ? ISD::ANY_EXTEND 8354 : FuncInfo.PreferredExtendType[V]; 8355 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType); 8356 PendingExports.push_back(Chain); 8357 } 8358 8359 #include "llvm/CodeGen/SelectionDAGISel.h" 8360 8361 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the 8362 /// entry block, return true. This includes arguments used by switches, since 8363 /// the switch may expand into multiple basic blocks. 8364 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) { 8365 // With FastISel active, we may be splitting blocks, so force creation 8366 // of virtual registers for all non-dead arguments. 8367 if (FastISel) 8368 return A->use_empty(); 8369 8370 const BasicBlock &Entry = A->getParent()->front(); 8371 for (const User *U : A->users()) 8372 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U)) 8373 return false; // Use not in entry block. 8374 8375 return true; 8376 } 8377 8378 typedef DenseMap<const Argument *, 8379 std::pair<const AllocaInst *, const StoreInst *>> 8380 ArgCopyElisionMapTy; 8381 8382 /// Scan the entry block of the function in FuncInfo for arguments that look 8383 /// like copies into a local alloca. Record any copied arguments in 8384 /// ArgCopyElisionCandidates. 8385 static void 8386 findArgumentCopyElisionCandidates(const DataLayout &DL, 8387 FunctionLoweringInfo *FuncInfo, 8388 ArgCopyElisionMapTy &ArgCopyElisionCandidates) { 8389 // Record the state of every static alloca used in the entry block. Argument 8390 // allocas are all used in the entry block, so we need approximately as many 8391 // entries as we have arguments. 8392 enum StaticAllocaInfo { Unknown, Clobbered, Elidable }; 8393 SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas; 8394 unsigned NumArgs = FuncInfo->Fn->arg_size(); 8395 StaticAllocas.reserve(NumArgs * 2); 8396 8397 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * { 8398 if (!V) 8399 return nullptr; 8400 V = V->stripPointerCasts(); 8401 const auto *AI = dyn_cast<AllocaInst>(V); 8402 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI)) 8403 return nullptr; 8404 auto Iter = StaticAllocas.insert({AI, Unknown}); 8405 return &Iter.first->second; 8406 }; 8407 8408 // Look for stores of arguments to static allocas. Look through bitcasts and 8409 // GEPs to handle type coercions, as long as the alloca is fully initialized 8410 // by the store. Any non-store use of an alloca escapes it and any subsequent 8411 // unanalyzed store might write it. 8412 // FIXME: Handle structs initialized with multiple stores. 8413 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) { 8414 // Look for stores, and handle non-store uses conservatively. 8415 const auto *SI = dyn_cast<StoreInst>(&I); 8416 if (!SI) { 8417 // We will look through cast uses, so ignore them completely. 8418 if (I.isCast()) 8419 continue; 8420 // Ignore debug info intrinsics, they don't escape or store to allocas. 8421 if (isa<DbgInfoIntrinsic>(I)) 8422 continue; 8423 // This is an unknown instruction. Assume it escapes or writes to all 8424 // static alloca operands. 8425 for (const Use &U : I.operands()) { 8426 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U)) 8427 *Info = StaticAllocaInfo::Clobbered; 8428 } 8429 continue; 8430 } 8431 8432 // If the stored value is a static alloca, mark it as escaped. 8433 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand())) 8434 *Info = StaticAllocaInfo::Clobbered; 8435 8436 // Check if the destination is a static alloca. 8437 const Value *Dst = SI->getPointerOperand()->stripPointerCasts(); 8438 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst); 8439 if (!Info) 8440 continue; 8441 const AllocaInst *AI = cast<AllocaInst>(Dst); 8442 8443 // Skip allocas that have been initialized or clobbered. 8444 if (*Info != StaticAllocaInfo::Unknown) 8445 continue; 8446 8447 // Check if the stored value is an argument, and that this store fully 8448 // initializes the alloca. Don't elide copies from the same argument twice. 8449 const Value *Val = SI->getValueOperand()->stripPointerCasts(); 8450 const auto *Arg = dyn_cast<Argument>(Val); 8451 if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() || 8452 Arg->getType()->isEmptyTy() || 8453 DL.getTypeStoreSize(Arg->getType()) != 8454 DL.getTypeAllocSize(AI->getAllocatedType()) || 8455 ArgCopyElisionCandidates.count(Arg)) { 8456 *Info = StaticAllocaInfo::Clobbered; 8457 continue; 8458 } 8459 8460 DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI << '\n'); 8461 8462 // Mark this alloca and store for argument copy elision. 8463 *Info = StaticAllocaInfo::Elidable; 8464 ArgCopyElisionCandidates.insert({Arg, {AI, SI}}); 8465 8466 // Stop scanning if we've seen all arguments. This will happen early in -O0 8467 // builds, which is useful, because -O0 builds have large entry blocks and 8468 // many allocas. 8469 if (ArgCopyElisionCandidates.size() == NumArgs) 8470 break; 8471 } 8472 } 8473 8474 /// Try to elide argument copies from memory into a local alloca. Succeeds if 8475 /// ArgVal is a load from a suitable fixed stack object. 8476 static void tryToElideArgumentCopy( 8477 FunctionLoweringInfo *FuncInfo, SmallVectorImpl<SDValue> &Chains, 8478 DenseMap<int, int> &ArgCopyElisionFrameIndexMap, 8479 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs, 8480 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, 8481 SDValue ArgVal, bool &ArgHasUses) { 8482 // Check if this is a load from a fixed stack object. 8483 auto *LNode = dyn_cast<LoadSDNode>(ArgVal); 8484 if (!LNode) 8485 return; 8486 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()); 8487 if (!FINode) 8488 return; 8489 8490 // Check that the fixed stack object is the right size and alignment. 8491 // Look at the alignment that the user wrote on the alloca instead of looking 8492 // at the stack object. 8493 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg); 8494 assert(ArgCopyIter != ArgCopyElisionCandidates.end()); 8495 const AllocaInst *AI = ArgCopyIter->second.first; 8496 int FixedIndex = FINode->getIndex(); 8497 int &AllocaIndex = FuncInfo->StaticAllocaMap[AI]; 8498 int OldIndex = AllocaIndex; 8499 MachineFrameInfo &MFI = FuncInfo->MF->getFrameInfo(); 8500 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) { 8501 DEBUG(dbgs() << " argument copy elision failed due to bad fixed stack " 8502 "object size\n"); 8503 return; 8504 } 8505 unsigned RequiredAlignment = AI->getAlignment(); 8506 if (!RequiredAlignment) { 8507 RequiredAlignment = FuncInfo->MF->getDataLayout().getABITypeAlignment( 8508 AI->getAllocatedType()); 8509 } 8510 if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) { 8511 DEBUG(dbgs() << " argument copy elision failed: alignment of alloca " 8512 "greater than stack argument alignment (" 8513 << RequiredAlignment << " vs " 8514 << MFI.getObjectAlignment(FixedIndex) << ")\n"); 8515 return; 8516 } 8517 8518 // Perform the elision. Delete the old stack object and replace its only use 8519 // in the variable info map. Mark the stack object as mutable. 8520 DEBUG({ 8521 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n' 8522 << " Replacing frame index " << OldIndex << " with " << FixedIndex 8523 << '\n'; 8524 }); 8525 MFI.RemoveStackObject(OldIndex); 8526 MFI.setIsImmutableObjectIndex(FixedIndex, false); 8527 AllocaIndex = FixedIndex; 8528 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex}); 8529 Chains.push_back(ArgVal.getValue(1)); 8530 8531 // Avoid emitting code for the store implementing the copy. 8532 const StoreInst *SI = ArgCopyIter->second.second; 8533 ElidedArgCopyInstrs.insert(SI); 8534 8535 // Check for uses of the argument again so that we can avoid exporting ArgVal 8536 // if it is't used by anything other than the store. 8537 for (const Value *U : Arg.users()) { 8538 if (U != SI) { 8539 ArgHasUses = true; 8540 break; 8541 } 8542 } 8543 } 8544 8545 void SelectionDAGISel::LowerArguments(const Function &F) { 8546 SelectionDAG &DAG = SDB->DAG; 8547 SDLoc dl = SDB->getCurSDLoc(); 8548 const DataLayout &DL = DAG.getDataLayout(); 8549 SmallVector<ISD::InputArg, 16> Ins; 8550 8551 if (!FuncInfo->CanLowerReturn) { 8552 // Put in an sret pointer parameter before all the other parameters. 8553 SmallVector<EVT, 1> ValueVTs; 8554 ComputeValueVTs(*TLI, DAG.getDataLayout(), 8555 PointerType::getUnqual(F.getReturnType()), ValueVTs); 8556 8557 // NOTE: Assuming that a pointer will never break down to more than one VT 8558 // or one register. 8559 ISD::ArgFlagsTy Flags; 8560 Flags.setSRet(); 8561 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]); 8562 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 8563 ISD::InputArg::NoArgIndex, 0); 8564 Ins.push_back(RetArg); 8565 } 8566 8567 // Look for stores of arguments to static allocas. Mark such arguments with a 8568 // flag to ask the target to give us the memory location of that argument if 8569 // available. 8570 ArgCopyElisionMapTy ArgCopyElisionCandidates; 8571 findArgumentCopyElisionCandidates(DL, FuncInfo, ArgCopyElisionCandidates); 8572 8573 // Set up the incoming argument description vector. 8574 for (const Argument &Arg : F.args()) { 8575 unsigned ArgNo = Arg.getArgNo(); 8576 SmallVector<EVT, 4> ValueVTs; 8577 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs); 8578 bool isArgValueUsed = !Arg.use_empty(); 8579 unsigned PartBase = 0; 8580 Type *FinalType = Arg.getType(); 8581 if (Arg.hasAttribute(Attribute::ByVal)) 8582 FinalType = cast<PointerType>(FinalType)->getElementType(); 8583 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters( 8584 FinalType, F.getCallingConv(), F.isVarArg()); 8585 for (unsigned Value = 0, NumValues = ValueVTs.size(); 8586 Value != NumValues; ++Value) { 8587 EVT VT = ValueVTs[Value]; 8588 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); 8589 ISD::ArgFlagsTy Flags; 8590 8591 // Certain targets (such as MIPS), may have a different ABI alignment 8592 // for a type depending on the context. Give the target a chance to 8593 // specify the alignment it wants. 8594 unsigned OriginalAlignment = 8595 TLI->getABIAlignmentForCallingConv(ArgTy, DL); 8596 8597 if (Arg.hasAttribute(Attribute::ZExt)) 8598 Flags.setZExt(); 8599 if (Arg.hasAttribute(Attribute::SExt)) 8600 Flags.setSExt(); 8601 if (Arg.hasAttribute(Attribute::InReg)) { 8602 // If we are using vectorcall calling convention, a structure that is 8603 // passed InReg - is surely an HVA 8604 if (F.getCallingConv() == CallingConv::X86_VectorCall && 8605 isa<StructType>(Arg.getType())) { 8606 // The first value of a structure is marked 8607 if (0 == Value) 8608 Flags.setHvaStart(); 8609 Flags.setHva(); 8610 } 8611 // Set InReg Flag 8612 Flags.setInReg(); 8613 } 8614 if (Arg.hasAttribute(Attribute::StructRet)) 8615 Flags.setSRet(); 8616 if (Arg.hasAttribute(Attribute::SwiftSelf)) 8617 Flags.setSwiftSelf(); 8618 if (Arg.hasAttribute(Attribute::SwiftError)) 8619 Flags.setSwiftError(); 8620 if (Arg.hasAttribute(Attribute::ByVal)) 8621 Flags.setByVal(); 8622 if (Arg.hasAttribute(Attribute::InAlloca)) { 8623 Flags.setInAlloca(); 8624 // Set the byval flag for CCAssignFn callbacks that don't know about 8625 // inalloca. This way we can know how many bytes we should've allocated 8626 // and how many bytes a callee cleanup function will pop. If we port 8627 // inalloca to more targets, we'll have to add custom inalloca handling 8628 // in the various CC lowering callbacks. 8629 Flags.setByVal(); 8630 } 8631 if (F.getCallingConv() == CallingConv::X86_INTR) { 8632 // IA Interrupt passes frame (1st parameter) by value in the stack. 8633 if (ArgNo == 0) 8634 Flags.setByVal(); 8635 } 8636 if (Flags.isByVal() || Flags.isInAlloca()) { 8637 PointerType *Ty = cast<PointerType>(Arg.getType()); 8638 Type *ElementTy = Ty->getElementType(); 8639 Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); 8640 // For ByVal, alignment should be passed from FE. BE will guess if 8641 // this info is not there but there are cases it cannot get right. 8642 unsigned FrameAlign; 8643 if (Arg.getParamAlignment()) 8644 FrameAlign = Arg.getParamAlignment(); 8645 else 8646 FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL); 8647 Flags.setByValAlign(FrameAlign); 8648 } 8649 if (Arg.hasAttribute(Attribute::Nest)) 8650 Flags.setNest(); 8651 if (NeedsRegBlock) 8652 Flags.setInConsecutiveRegs(); 8653 Flags.setOrigAlign(OriginalAlignment); 8654 if (ArgCopyElisionCandidates.count(&Arg)) 8655 Flags.setCopyElisionCandidate(); 8656 8657 MVT RegisterVT = 8658 TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT); 8659 unsigned NumRegs = 8660 TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT); 8661 for (unsigned i = 0; i != NumRegs; ++i) { 8662 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed, 8663 ArgNo, PartBase+i*RegisterVT.getStoreSize()); 8664 if (NumRegs > 1 && i == 0) 8665 MyFlags.Flags.setSplit(); 8666 // if it isn't first piece, alignment must be 1 8667 else if (i > 0) { 8668 MyFlags.Flags.setOrigAlign(1); 8669 if (i == NumRegs - 1) 8670 MyFlags.Flags.setSplitEnd(); 8671 } 8672 Ins.push_back(MyFlags); 8673 } 8674 if (NeedsRegBlock && Value == NumValues - 1) 8675 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast(); 8676 PartBase += VT.getStoreSize(); 8677 } 8678 } 8679 8680 // Call the target to set up the argument values. 8681 SmallVector<SDValue, 8> InVals; 8682 SDValue NewRoot = TLI->LowerFormalArguments( 8683 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals); 8684 8685 // Verify that the target's LowerFormalArguments behaved as expected. 8686 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other && 8687 "LowerFormalArguments didn't return a valid chain!"); 8688 assert(InVals.size() == Ins.size() && 8689 "LowerFormalArguments didn't emit the correct number of values!"); 8690 DEBUG({ 8691 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 8692 assert(InVals[i].getNode() && 8693 "LowerFormalArguments emitted a null value!"); 8694 assert(EVT(Ins[i].VT) == InVals[i].getValueType() && 8695 "LowerFormalArguments emitted a value with the wrong type!"); 8696 } 8697 }); 8698 8699 // Update the DAG with the new chain value resulting from argument lowering. 8700 DAG.setRoot(NewRoot); 8701 8702 // Set up the argument values. 8703 unsigned i = 0; 8704 if (!FuncInfo->CanLowerReturn) { 8705 // Create a virtual register for the sret pointer, and put in a copy 8706 // from the sret argument into it. 8707 SmallVector<EVT, 1> ValueVTs; 8708 ComputeValueVTs(*TLI, DAG.getDataLayout(), 8709 PointerType::getUnqual(F.getReturnType()), ValueVTs); 8710 MVT VT = ValueVTs[0].getSimpleVT(); 8711 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT); 8712 Optional<ISD::NodeType> AssertOp = None; 8713 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, 8714 RegVT, VT, nullptr, AssertOp); 8715 8716 MachineFunction& MF = SDB->DAG.getMachineFunction(); 8717 MachineRegisterInfo& RegInfo = MF.getRegInfo(); 8718 unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT)); 8719 FuncInfo->DemoteRegister = SRetReg; 8720 NewRoot = 8721 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue); 8722 DAG.setRoot(NewRoot); 8723 8724 // i indexes lowered arguments. Bump it past the hidden sret argument. 8725 ++i; 8726 } 8727 8728 SmallVector<SDValue, 4> Chains; 8729 DenseMap<int, int> ArgCopyElisionFrameIndexMap; 8730 for (const Argument &Arg : F.args()) { 8731 SmallVector<SDValue, 4> ArgValues; 8732 SmallVector<EVT, 4> ValueVTs; 8733 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs); 8734 unsigned NumValues = ValueVTs.size(); 8735 if (NumValues == 0) 8736 continue; 8737 8738 bool ArgHasUses = !Arg.use_empty(); 8739 8740 // Elide the copying store if the target loaded this argument from a 8741 // suitable fixed stack object. 8742 if (Ins[i].Flags.isCopyElisionCandidate()) { 8743 tryToElideArgumentCopy(FuncInfo, Chains, ArgCopyElisionFrameIndexMap, 8744 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg, 8745 InVals[i], ArgHasUses); 8746 } 8747 8748 // If this argument is unused then remember its value. It is used to generate 8749 // debugging information. 8750 bool isSwiftErrorArg = 8751 TLI->supportSwiftError() && 8752 Arg.hasAttribute(Attribute::SwiftError); 8753 if (!ArgHasUses && !isSwiftErrorArg) { 8754 SDB->setUnusedArgValue(&Arg, InVals[i]); 8755 8756 // Also remember any frame index for use in FastISel. 8757 if (FrameIndexSDNode *FI = 8758 dyn_cast<FrameIndexSDNode>(InVals[i].getNode())) 8759 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); 8760 } 8761 8762 for (unsigned Val = 0; Val != NumValues; ++Val) { 8763 EVT VT = ValueVTs[Val]; 8764 MVT PartVT = 8765 TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), VT); 8766 unsigned NumParts = 8767 TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), VT); 8768 8769 // Even an apparant 'unused' swifterror argument needs to be returned. So 8770 // we do generate a copy for it that can be used on return from the 8771 // function. 8772 if (ArgHasUses || isSwiftErrorArg) { 8773 Optional<ISD::NodeType> AssertOp; 8774 if (Arg.hasAttribute(Attribute::SExt)) 8775 AssertOp = ISD::AssertSext; 8776 else if (Arg.hasAttribute(Attribute::ZExt)) 8777 AssertOp = ISD::AssertZext; 8778 8779 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts, 8780 PartVT, VT, nullptr, AssertOp, 8781 true)); 8782 } 8783 8784 i += NumParts; 8785 } 8786 8787 // We don't need to do anything else for unused arguments. 8788 if (ArgValues.empty()) 8789 continue; 8790 8791 // Note down frame index. 8792 if (FrameIndexSDNode *FI = 8793 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode())) 8794 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); 8795 8796 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues), 8797 SDB->getCurSDLoc()); 8798 8799 SDB->setValue(&Arg, Res); 8800 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) { 8801 // We want to associate the argument with the frame index, among 8802 // involved operands, that correspond to the lowest address. The 8803 // getCopyFromParts function, called earlier, is swapping the order of 8804 // the operands to BUILD_PAIR depending on endianness. The result of 8805 // that swapping is that the least significant bits of the argument will 8806 // be in the first operand of the BUILD_PAIR node, and the most 8807 // significant bits will be in the second operand. 8808 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0; 8809 if (LoadSDNode *LNode = 8810 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode())) 8811 if (FrameIndexSDNode *FI = 8812 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) 8813 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); 8814 } 8815 8816 // Update the SwiftErrorVRegDefMap. 8817 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) { 8818 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); 8819 if (TargetRegisterInfo::isVirtualRegister(Reg)) 8820 FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, 8821 FuncInfo->SwiftErrorArg, Reg); 8822 } 8823 8824 // If this argument is live outside of the entry block, insert a copy from 8825 // wherever we got it to the vreg that other BB's will reference it as. 8826 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) { 8827 // If we can, though, try to skip creating an unnecessary vreg. 8828 // FIXME: This isn't very clean... it would be nice to make this more 8829 // general. It's also subtly incompatible with the hacks FastISel 8830 // uses with vregs. 8831 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); 8832 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 8833 FuncInfo->ValueMap[&Arg] = Reg; 8834 continue; 8835 } 8836 } 8837 if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) { 8838 FuncInfo->InitializeRegForValue(&Arg); 8839 SDB->CopyToExportRegsIfNeeded(&Arg); 8840 } 8841 } 8842 8843 if (!Chains.empty()) { 8844 Chains.push_back(NewRoot); 8845 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 8846 } 8847 8848 DAG.setRoot(NewRoot); 8849 8850 assert(i == InVals.size() && "Argument register count mismatch!"); 8851 8852 // If any argument copy elisions occurred and we have debug info, update the 8853 // stale frame indices used in the dbg.declare variable info table. 8854 MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo(); 8855 if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) { 8856 for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) { 8857 auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot); 8858 if (I != ArgCopyElisionFrameIndexMap.end()) 8859 VI.Slot = I->second; 8860 } 8861 } 8862 8863 // Finally, if the target has anything special to do, allow it to do so. 8864 EmitFunctionEntryCode(); 8865 } 8866 8867 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 8868 /// ensure constants are generated when needed. Remember the virtual registers 8869 /// that need to be added to the Machine PHI nodes as input. We cannot just 8870 /// directly add them, because expansion might result in multiple MBB's for one 8871 /// BB. As such, the start of the BB might correspond to a different MBB than 8872 /// the end. 8873 /// 8874 void 8875 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 8876 const TerminatorInst *TI = LLVMBB->getTerminator(); 8877 8878 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 8879 8880 // Check PHI nodes in successors that expect a value to be available from this 8881 // block. 8882 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 8883 const BasicBlock *SuccBB = TI->getSuccessor(succ); 8884 if (!isa<PHINode>(SuccBB->begin())) continue; 8885 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 8886 8887 // If this terminator has multiple identical successors (common for 8888 // switches), only handle each succ once. 8889 if (!SuccsHandled.insert(SuccMBB).second) 8890 continue; 8891 8892 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 8893 8894 // At this point we know that there is a 1-1 correspondence between LLVM PHI 8895 // nodes and Machine PHI nodes, but the incoming operands have not been 8896 // emitted yet. 8897 for (BasicBlock::const_iterator I = SuccBB->begin(); 8898 const PHINode *PN = dyn_cast<PHINode>(I); ++I) { 8899 // Ignore dead phi's. 8900 if (PN->use_empty()) continue; 8901 8902 // Skip empty types 8903 if (PN->getType()->isEmptyTy()) 8904 continue; 8905 8906 unsigned Reg; 8907 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); 8908 8909 if (const Constant *C = dyn_cast<Constant>(PHIOp)) { 8910 unsigned &RegOut = ConstantsOut[C]; 8911 if (RegOut == 0) { 8912 RegOut = FuncInfo.CreateRegs(C->getType()); 8913 CopyValueToVirtualRegister(C, RegOut); 8914 } 8915 Reg = RegOut; 8916 } else { 8917 DenseMap<const Value *, unsigned>::iterator I = 8918 FuncInfo.ValueMap.find(PHIOp); 8919 if (I != FuncInfo.ValueMap.end()) 8920 Reg = I->second; 8921 else { 8922 assert(isa<AllocaInst>(PHIOp) && 8923 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 8924 "Didn't codegen value into a register!??"); 8925 Reg = FuncInfo.CreateRegs(PHIOp->getType()); 8926 CopyValueToVirtualRegister(PHIOp, Reg); 8927 } 8928 } 8929 8930 // Remember that this register needs to added to the machine PHI node as 8931 // the input for this MBB. 8932 SmallVector<EVT, 4> ValueVTs; 8933 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8934 ComputeValueVTs(TLI, DAG.getDataLayout(), PN->getType(), ValueVTs); 8935 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) { 8936 EVT VT = ValueVTs[vti]; 8937 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT); 8938 for (unsigned i = 0, e = NumRegisters; i != e; ++i) 8939 FuncInfo.PHINodesToUpdate.push_back( 8940 std::make_pair(&*MBBI++, Reg + i)); 8941 Reg += NumRegisters; 8942 } 8943 } 8944 } 8945 8946 ConstantsOut.clear(); 8947 } 8948 8949 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB 8950 /// is 0. 8951 MachineBasicBlock * 8952 SelectionDAGBuilder::StackProtectorDescriptor:: 8953 AddSuccessorMBB(const BasicBlock *BB, 8954 MachineBasicBlock *ParentMBB, 8955 bool IsLikely, 8956 MachineBasicBlock *SuccMBB) { 8957 // If SuccBB has not been created yet, create it. 8958 if (!SuccMBB) { 8959 MachineFunction *MF = ParentMBB->getParent(); 8960 MachineFunction::iterator BBI(ParentMBB); 8961 SuccMBB = MF->CreateMachineBasicBlock(BB); 8962 MF->insert(++BBI, SuccMBB); 8963 } 8964 // Add it as a successor of ParentMBB. 8965 ParentMBB->addSuccessor( 8966 SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely)); 8967 return SuccMBB; 8968 } 8969 8970 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) { 8971 MachineFunction::iterator I(MBB); 8972 if (++I == FuncInfo.MF->end()) 8973 return nullptr; 8974 return &*I; 8975 } 8976 8977 /// During lowering new call nodes can be created (such as memset, etc.). 8978 /// Those will become new roots of the current DAG, but complications arise 8979 /// when they are tail calls. In such cases, the call lowering will update 8980 /// the root, but the builder still needs to know that a tail call has been 8981 /// lowered in order to avoid generating an additional return. 8982 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) { 8983 // If the node is null, we do have a tail call. 8984 if (MaybeTC.getNode() != nullptr) 8985 DAG.setRoot(MaybeTC); 8986 else 8987 HasTailCall = true; 8988 } 8989 8990 uint64_t 8991 SelectionDAGBuilder::getJumpTableRange(const CaseClusterVector &Clusters, 8992 unsigned First, unsigned Last) const { 8993 assert(Last >= First); 8994 const APInt &LowCase = Clusters[First].Low->getValue(); 8995 const APInt &HighCase = Clusters[Last].High->getValue(); 8996 assert(LowCase.getBitWidth() == HighCase.getBitWidth()); 8997 8998 // FIXME: A range of consecutive cases has 100% density, but only requires one 8999 // comparison to lower. We should discriminate against such consecutive ranges 9000 // in jump tables. 9001 9002 return (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100) + 1; 9003 } 9004 9005 uint64_t SelectionDAGBuilder::getJumpTableNumCases( 9006 const SmallVectorImpl<unsigned> &TotalCases, unsigned First, 9007 unsigned Last) const { 9008 assert(Last >= First); 9009 assert(TotalCases[Last] >= TotalCases[First]); 9010 uint64_t NumCases = 9011 TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]); 9012 return NumCases; 9013 } 9014 9015 bool SelectionDAGBuilder::buildJumpTable(const CaseClusterVector &Clusters, 9016 unsigned First, unsigned Last, 9017 const SwitchInst *SI, 9018 MachineBasicBlock *DefaultMBB, 9019 CaseCluster &JTCluster) { 9020 assert(First <= Last); 9021 9022 auto Prob = BranchProbability::getZero(); 9023 unsigned NumCmps = 0; 9024 std::vector<MachineBasicBlock*> Table; 9025 DenseMap<MachineBasicBlock*, BranchProbability> JTProbs; 9026 9027 // Initialize probabilities in JTProbs. 9028 for (unsigned I = First; I <= Last; ++I) 9029 JTProbs[Clusters[I].MBB] = BranchProbability::getZero(); 9030 9031 for (unsigned I = First; I <= Last; ++I) { 9032 assert(Clusters[I].Kind == CC_Range); 9033 Prob += Clusters[I].Prob; 9034 const APInt &Low = Clusters[I].Low->getValue(); 9035 const APInt &High = Clusters[I].High->getValue(); 9036 NumCmps += (Low == High) ? 1 : 2; 9037 if (I != First) { 9038 // Fill the gap between this and the previous cluster. 9039 const APInt &PreviousHigh = Clusters[I - 1].High->getValue(); 9040 assert(PreviousHigh.slt(Low)); 9041 uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1; 9042 for (uint64_t J = 0; J < Gap; J++) 9043 Table.push_back(DefaultMBB); 9044 } 9045 uint64_t ClusterSize = (High - Low).getLimitedValue() + 1; 9046 for (uint64_t J = 0; J < ClusterSize; ++J) 9047 Table.push_back(Clusters[I].MBB); 9048 JTProbs[Clusters[I].MBB] += Clusters[I].Prob; 9049 } 9050 9051 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9052 unsigned NumDests = JTProbs.size(); 9053 if (TLI.isSuitableForBitTests( 9054 NumDests, NumCmps, Clusters[First].Low->getValue(), 9055 Clusters[Last].High->getValue(), DAG.getDataLayout())) { 9056 // Clusters[First..Last] should be lowered as bit tests instead. 9057 return false; 9058 } 9059 9060 // Create the MBB that will load from and jump through the table. 9061 // Note: We create it here, but it's not inserted into the function yet. 9062 MachineFunction *CurMF = FuncInfo.MF; 9063 MachineBasicBlock *JumpTableMBB = 9064 CurMF->CreateMachineBasicBlock(SI->getParent()); 9065 9066 // Add successors. Note: use table order for determinism. 9067 SmallPtrSet<MachineBasicBlock *, 8> Done; 9068 for (MachineBasicBlock *Succ : Table) { 9069 if (Done.count(Succ)) 9070 continue; 9071 addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]); 9072 Done.insert(Succ); 9073 } 9074 JumpTableMBB->normalizeSuccProbs(); 9075 9076 unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI.getJumpTableEncoding()) 9077 ->createJumpTableIndex(Table); 9078 9079 // Set up the jump table info. 9080 JumpTable JT(-1U, JTI, JumpTableMBB, nullptr); 9081 JumpTableHeader JTH(Clusters[First].Low->getValue(), 9082 Clusters[Last].High->getValue(), SI->getCondition(), 9083 nullptr, false); 9084 JTCases.emplace_back(std::move(JTH), std::move(JT)); 9085 9086 JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High, 9087 JTCases.size() - 1, Prob); 9088 return true; 9089 } 9090 9091 void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters, 9092 const SwitchInst *SI, 9093 MachineBasicBlock *DefaultMBB) { 9094 #ifndef NDEBUG 9095 // Clusters must be non-empty, sorted, and only contain Range clusters. 9096 assert(!Clusters.empty()); 9097 for (CaseCluster &C : Clusters) 9098 assert(C.Kind == CC_Range); 9099 for (unsigned i = 1, e = Clusters.size(); i < e; ++i) 9100 assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue())); 9101 #endif 9102 9103 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9104 if (!TLI.areJTsAllowed(SI->getParent()->getParent())) 9105 return; 9106 9107 const int64_t N = Clusters.size(); 9108 const unsigned MinJumpTableEntries = TLI.getMinimumJumpTableEntries(); 9109 const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2; 9110 9111 if (N < 2 || N < MinJumpTableEntries) 9112 return; 9113 9114 // TotalCases[i]: Total nbr of cases in Clusters[0..i]. 9115 SmallVector<unsigned, 8> TotalCases(N); 9116 for (unsigned i = 0; i < N; ++i) { 9117 const APInt &Hi = Clusters[i].High->getValue(); 9118 const APInt &Lo = Clusters[i].Low->getValue(); 9119 TotalCases[i] = (Hi - Lo).getLimitedValue() + 1; 9120 if (i != 0) 9121 TotalCases[i] += TotalCases[i - 1]; 9122 } 9123 9124 // Cheap case: the whole range may be suitable for jump table. 9125 uint64_t Range = getJumpTableRange(Clusters,0, N - 1); 9126 uint64_t NumCases = getJumpTableNumCases(TotalCases, 0, N - 1); 9127 assert(NumCases < UINT64_MAX / 100); 9128 assert(Range >= NumCases); 9129 if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) { 9130 CaseCluster JTCluster; 9131 if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) { 9132 Clusters[0] = JTCluster; 9133 Clusters.resize(1); 9134 return; 9135 } 9136 } 9137 9138 // The algorithm below is not suitable for -O0. 9139 if (TM.getOptLevel() == CodeGenOpt::None) 9140 return; 9141 9142 // Split Clusters into minimum number of dense partitions. The algorithm uses 9143 // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code 9144 // for the Case Statement'" (1994), but builds the MinPartitions array in 9145 // reverse order to make it easier to reconstruct the partitions in ascending 9146 // order. In the choice between two optimal partitionings, it picks the one 9147 // which yields more jump tables. 9148 9149 // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1]. 9150 SmallVector<unsigned, 8> MinPartitions(N); 9151 // LastElement[i] is the last element of the partition starting at i. 9152 SmallVector<unsigned, 8> LastElement(N); 9153 // PartitionsScore[i] is used to break ties when choosing between two 9154 // partitionings resulting in the same number of partitions. 9155 SmallVector<unsigned, 8> PartitionsScore(N); 9156 // For PartitionsScore, a small number of comparisons is considered as good as 9157 // a jump table and a single comparison is considered better than a jump 9158 // table. 9159 enum PartitionScores : unsigned { 9160 NoTable = 0, 9161 Table = 1, 9162 FewCases = 1, 9163 SingleCase = 2 9164 }; 9165 9166 // Base case: There is only one way to partition Clusters[N-1]. 9167 MinPartitions[N - 1] = 1; 9168 LastElement[N - 1] = N - 1; 9169 PartitionsScore[N - 1] = PartitionScores::SingleCase; 9170 9171 // Note: loop indexes are signed to avoid underflow. 9172 for (int64_t i = N - 2; i >= 0; i--) { 9173 // Find optimal partitioning of Clusters[i..N-1]. 9174 // Baseline: Put Clusters[i] into a partition on its own. 9175 MinPartitions[i] = MinPartitions[i + 1] + 1; 9176 LastElement[i] = i; 9177 PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase; 9178 9179 // Search for a solution that results in fewer partitions. 9180 for (int64_t j = N - 1; j > i; j--) { 9181 // Try building a partition from Clusters[i..j]. 9182 uint64_t Range = getJumpTableRange(Clusters, i, j); 9183 uint64_t NumCases = getJumpTableNumCases(TotalCases, i, j); 9184 assert(NumCases < UINT64_MAX / 100); 9185 assert(Range >= NumCases); 9186 if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) { 9187 unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]); 9188 unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1]; 9189 int64_t NumEntries = j - i + 1; 9190 9191 if (NumEntries == 1) 9192 Score += PartitionScores::SingleCase; 9193 else if (NumEntries <= SmallNumberOfEntries) 9194 Score += PartitionScores::FewCases; 9195 else if (NumEntries >= MinJumpTableEntries) 9196 Score += PartitionScores::Table; 9197 9198 // If this leads to fewer partitions, or to the same number of 9199 // partitions with better score, it is a better partitioning. 9200 if (NumPartitions < MinPartitions[i] || 9201 (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) { 9202 MinPartitions[i] = NumPartitions; 9203 LastElement[i] = j; 9204 PartitionsScore[i] = Score; 9205 } 9206 } 9207 } 9208 } 9209 9210 // Iterate over the partitions, replacing some with jump tables in-place. 9211 unsigned DstIndex = 0; 9212 for (unsigned First = 0, Last; First < N; First = Last + 1) { 9213 Last = LastElement[First]; 9214 assert(Last >= First); 9215 assert(DstIndex <= First); 9216 unsigned NumClusters = Last - First + 1; 9217 9218 CaseCluster JTCluster; 9219 if (NumClusters >= MinJumpTableEntries && 9220 buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) { 9221 Clusters[DstIndex++] = JTCluster; 9222 } else { 9223 for (unsigned I = First; I <= Last; ++I) 9224 std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I])); 9225 } 9226 } 9227 Clusters.resize(DstIndex); 9228 } 9229 9230 bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters, 9231 unsigned First, unsigned Last, 9232 const SwitchInst *SI, 9233 CaseCluster &BTCluster) { 9234 assert(First <= Last); 9235 if (First == Last) 9236 return false; 9237 9238 BitVector Dests(FuncInfo.MF->getNumBlockIDs()); 9239 unsigned NumCmps = 0; 9240 for (int64_t I = First; I <= Last; ++I) { 9241 assert(Clusters[I].Kind == CC_Range); 9242 Dests.set(Clusters[I].MBB->getNumber()); 9243 NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2; 9244 } 9245 unsigned NumDests = Dests.count(); 9246 9247 APInt Low = Clusters[First].Low->getValue(); 9248 APInt High = Clusters[Last].High->getValue(); 9249 assert(Low.slt(High)); 9250 9251 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9252 const DataLayout &DL = DAG.getDataLayout(); 9253 if (!TLI.isSuitableForBitTests(NumDests, NumCmps, Low, High, DL)) 9254 return false; 9255 9256 APInt LowBound; 9257 APInt CmpRange; 9258 9259 const int BitWidth = TLI.getPointerTy(DL).getSizeInBits(); 9260 assert(TLI.rangeFitsInWord(Low, High, DL) && 9261 "Case range must fit in bit mask!"); 9262 9263 // Check if the clusters cover a contiguous range such that no value in the 9264 // range will jump to the default statement. 9265 bool ContiguousRange = true; 9266 for (int64_t I = First + 1; I <= Last; ++I) { 9267 if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) { 9268 ContiguousRange = false; 9269 break; 9270 } 9271 } 9272 9273 if (Low.isStrictlyPositive() && High.slt(BitWidth)) { 9274 // Optimize the case where all the case values fit in a word without having 9275 // to subtract minValue. In this case, we can optimize away the subtraction. 9276 LowBound = APInt::getNullValue(Low.getBitWidth()); 9277 CmpRange = High; 9278 ContiguousRange = false; 9279 } else { 9280 LowBound = Low; 9281 CmpRange = High - Low; 9282 } 9283 9284 CaseBitsVector CBV; 9285 auto TotalProb = BranchProbability::getZero(); 9286 for (unsigned i = First; i <= Last; ++i) { 9287 // Find the CaseBits for this destination. 9288 unsigned j; 9289 for (j = 0; j < CBV.size(); ++j) 9290 if (CBV[j].BB == Clusters[i].MBB) 9291 break; 9292 if (j == CBV.size()) 9293 CBV.push_back( 9294 CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero())); 9295 CaseBits *CB = &CBV[j]; 9296 9297 // Update Mask, Bits and ExtraProb. 9298 uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue(); 9299 uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue(); 9300 assert(Hi >= Lo && Hi < 64 && "Invalid bit case!"); 9301 CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo; 9302 CB->Bits += Hi - Lo + 1; 9303 CB->ExtraProb += Clusters[i].Prob; 9304 TotalProb += Clusters[i].Prob; 9305 } 9306 9307 BitTestInfo BTI; 9308 std::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) { 9309 // Sort by probability first, number of bits second. 9310 if (a.ExtraProb != b.ExtraProb) 9311 return a.ExtraProb > b.ExtraProb; 9312 return a.Bits > b.Bits; 9313 }); 9314 9315 for (auto &CB : CBV) { 9316 MachineBasicBlock *BitTestBB = 9317 FuncInfo.MF->CreateMachineBasicBlock(SI->getParent()); 9318 BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb)); 9319 } 9320 BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange), 9321 SI->getCondition(), -1U, MVT::Other, false, 9322 ContiguousRange, nullptr, nullptr, std::move(BTI), 9323 TotalProb); 9324 9325 BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High, 9326 BitTestCases.size() - 1, TotalProb); 9327 return true; 9328 } 9329 9330 void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters, 9331 const SwitchInst *SI) { 9332 // Partition Clusters into as few subsets as possible, where each subset has a 9333 // range that fits in a machine word and has <= 3 unique destinations. 9334 9335 #ifndef NDEBUG 9336 // Clusters must be sorted and contain Range or JumpTable clusters. 9337 assert(!Clusters.empty()); 9338 assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable); 9339 for (const CaseCluster &C : Clusters) 9340 assert(C.Kind == CC_Range || C.Kind == CC_JumpTable); 9341 for (unsigned i = 1; i < Clusters.size(); ++i) 9342 assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue())); 9343 #endif 9344 9345 // The algorithm below is not suitable for -O0. 9346 if (TM.getOptLevel() == CodeGenOpt::None) 9347 return; 9348 9349 // If target does not have legal shift left, do not emit bit tests at all. 9350 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9351 const DataLayout &DL = DAG.getDataLayout(); 9352 9353 EVT PTy = TLI.getPointerTy(DL); 9354 if (!TLI.isOperationLegal(ISD::SHL, PTy)) 9355 return; 9356 9357 int BitWidth = PTy.getSizeInBits(); 9358 const int64_t N = Clusters.size(); 9359 9360 // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1]. 9361 SmallVector<unsigned, 8> MinPartitions(N); 9362 // LastElement[i] is the last element of the partition starting at i. 9363 SmallVector<unsigned, 8> LastElement(N); 9364 9365 // FIXME: This might not be the best algorithm for finding bit test clusters. 9366 9367 // Base case: There is only one way to partition Clusters[N-1]. 9368 MinPartitions[N - 1] = 1; 9369 LastElement[N - 1] = N - 1; 9370 9371 // Note: loop indexes are signed to avoid underflow. 9372 for (int64_t i = N - 2; i >= 0; --i) { 9373 // Find optimal partitioning of Clusters[i..N-1]. 9374 // Baseline: Put Clusters[i] into a partition on its own. 9375 MinPartitions[i] = MinPartitions[i + 1] + 1; 9376 LastElement[i] = i; 9377 9378 // Search for a solution that results in fewer partitions. 9379 // Note: the search is limited by BitWidth, reducing time complexity. 9380 for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) { 9381 // Try building a partition from Clusters[i..j]. 9382 9383 // Check the range. 9384 if (!TLI.rangeFitsInWord(Clusters[i].Low->getValue(), 9385 Clusters[j].High->getValue(), DL)) 9386 continue; 9387 9388 // Check nbr of destinations and cluster types. 9389 // FIXME: This works, but doesn't seem very efficient. 9390 bool RangesOnly = true; 9391 BitVector Dests(FuncInfo.MF->getNumBlockIDs()); 9392 for (int64_t k = i; k <= j; k++) { 9393 if (Clusters[k].Kind != CC_Range) { 9394 RangesOnly = false; 9395 break; 9396 } 9397 Dests.set(Clusters[k].MBB->getNumber()); 9398 } 9399 if (!RangesOnly || Dests.count() > 3) 9400 break; 9401 9402 // Check if it's a better partition. 9403 unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]); 9404 if (NumPartitions < MinPartitions[i]) { 9405 // Found a better partition. 9406 MinPartitions[i] = NumPartitions; 9407 LastElement[i] = j; 9408 } 9409 } 9410 } 9411 9412 // Iterate over the partitions, replacing with bit-test clusters in-place. 9413 unsigned DstIndex = 0; 9414 for (unsigned First = 0, Last; First < N; First = Last + 1) { 9415 Last = LastElement[First]; 9416 assert(First <= Last); 9417 assert(DstIndex <= First); 9418 9419 CaseCluster BitTestCluster; 9420 if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) { 9421 Clusters[DstIndex++] = BitTestCluster; 9422 } else { 9423 size_t NumClusters = Last - First + 1; 9424 std::memmove(&Clusters[DstIndex], &Clusters[First], 9425 sizeof(Clusters[0]) * NumClusters); 9426 DstIndex += NumClusters; 9427 } 9428 } 9429 Clusters.resize(DstIndex); 9430 } 9431 9432 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond, 9433 MachineBasicBlock *SwitchMBB, 9434 MachineBasicBlock *DefaultMBB) { 9435 MachineFunction *CurMF = FuncInfo.MF; 9436 MachineBasicBlock *NextMBB = nullptr; 9437 MachineFunction::iterator BBI(W.MBB); 9438 if (++BBI != FuncInfo.MF->end()) 9439 NextMBB = &*BBI; 9440 9441 unsigned Size = W.LastCluster - W.FirstCluster + 1; 9442 9443 BranchProbabilityInfo *BPI = FuncInfo.BPI; 9444 9445 if (Size == 2 && W.MBB == SwitchMBB) { 9446 // If any two of the cases has the same destination, and if one value 9447 // is the same as the other, but has one bit unset that the other has set, 9448 // use bit manipulation to do two compares at once. For example: 9449 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" 9450 // TODO: This could be extended to merge any 2 cases in switches with 3 9451 // cases. 9452 // TODO: Handle cases where W.CaseBB != SwitchBB. 9453 CaseCluster &Small = *W.FirstCluster; 9454 CaseCluster &Big = *W.LastCluster; 9455 9456 if (Small.Low == Small.High && Big.Low == Big.High && 9457 Small.MBB == Big.MBB) { 9458 const APInt &SmallValue = Small.Low->getValue(); 9459 const APInt &BigValue = Big.Low->getValue(); 9460 9461 // Check that there is only one bit different. 9462 APInt CommonBit = BigValue ^ SmallValue; 9463 if (CommonBit.isPowerOf2()) { 9464 SDValue CondLHS = getValue(Cond); 9465 EVT VT = CondLHS.getValueType(); 9466 SDLoc DL = getCurSDLoc(); 9467 9468 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS, 9469 DAG.getConstant(CommonBit, DL, VT)); 9470 SDValue Cond = DAG.getSetCC( 9471 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT), 9472 ISD::SETEQ); 9473 9474 // Update successor info. 9475 // Both Small and Big will jump to Small.BB, so we sum up the 9476 // probabilities. 9477 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob); 9478 if (BPI) 9479 addSuccessorWithProb( 9480 SwitchMBB, DefaultMBB, 9481 // The default destination is the first successor in IR. 9482 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0)); 9483 else 9484 addSuccessorWithProb(SwitchMBB, DefaultMBB); 9485 9486 // Insert the true branch. 9487 SDValue BrCond = 9488 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond, 9489 DAG.getBasicBlock(Small.MBB)); 9490 // Insert the false branch. 9491 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond, 9492 DAG.getBasicBlock(DefaultMBB)); 9493 9494 DAG.setRoot(BrCond); 9495 return; 9496 } 9497 } 9498 } 9499 9500 if (TM.getOptLevel() != CodeGenOpt::None) { 9501 // Order cases by probability so the most likely case will be checked first. 9502 std::sort(W.FirstCluster, W.LastCluster + 1, 9503 [](const CaseCluster &a, const CaseCluster &b) { 9504 return a.Prob > b.Prob; 9505 }); 9506 9507 // Rearrange the case blocks so that the last one falls through if possible 9508 // without without changing the order of probabilities. 9509 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) { 9510 --I; 9511 if (I->Prob > W.LastCluster->Prob) 9512 break; 9513 if (I->Kind == CC_Range && I->MBB == NextMBB) { 9514 std::swap(*I, *W.LastCluster); 9515 break; 9516 } 9517 } 9518 } 9519 9520 // Compute total probability. 9521 BranchProbability DefaultProb = W.DefaultProb; 9522 BranchProbability UnhandledProbs = DefaultProb; 9523 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) 9524 UnhandledProbs += I->Prob; 9525 9526 MachineBasicBlock *CurMBB = W.MBB; 9527 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { 9528 MachineBasicBlock *Fallthrough; 9529 if (I == W.LastCluster) { 9530 // For the last cluster, fall through to the default destination. 9531 Fallthrough = DefaultMBB; 9532 } else { 9533 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); 9534 CurMF->insert(BBI, Fallthrough); 9535 // Put Cond in a virtual register to make it available from the new blocks. 9536 ExportFromCurrentBlock(Cond); 9537 } 9538 UnhandledProbs -= I->Prob; 9539 9540 switch (I->Kind) { 9541 case CC_JumpTable: { 9542 // FIXME: Optimize away range check based on pivot comparisons. 9543 JumpTableHeader *JTH = &JTCases[I->JTCasesIndex].first; 9544 JumpTable *JT = &JTCases[I->JTCasesIndex].second; 9545 9546 // The jump block hasn't been inserted yet; insert it here. 9547 MachineBasicBlock *JumpMBB = JT->MBB; 9548 CurMF->insert(BBI, JumpMBB); 9549 9550 auto JumpProb = I->Prob; 9551 auto FallthroughProb = UnhandledProbs; 9552 9553 // If the default statement is a target of the jump table, we evenly 9554 // distribute the default probability to successors of CurMBB. Also 9555 // update the probability on the edge from JumpMBB to Fallthrough. 9556 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), 9557 SE = JumpMBB->succ_end(); 9558 SI != SE; ++SI) { 9559 if (*SI == DefaultMBB) { 9560 JumpProb += DefaultProb / 2; 9561 FallthroughProb -= DefaultProb / 2; 9562 JumpMBB->setSuccProbability(SI, DefaultProb / 2); 9563 JumpMBB->normalizeSuccProbs(); 9564 break; 9565 } 9566 } 9567 9568 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); 9569 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); 9570 CurMBB->normalizeSuccProbs(); 9571 9572 // The jump table header will be inserted in our current block, do the 9573 // range check, and fall through to our fallthrough block. 9574 JTH->HeaderBB = CurMBB; 9575 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. 9576 9577 // If we're in the right place, emit the jump table header right now. 9578 if (CurMBB == SwitchMBB) { 9579 visitJumpTableHeader(*JT, *JTH, SwitchMBB); 9580 JTH->Emitted = true; 9581 } 9582 break; 9583 } 9584 case CC_BitTests: { 9585 // FIXME: Optimize away range check based on pivot comparisons. 9586 BitTestBlock *BTB = &BitTestCases[I->BTCasesIndex]; 9587 9588 // The bit test blocks haven't been inserted yet; insert them here. 9589 for (BitTestCase &BTC : BTB->Cases) 9590 CurMF->insert(BBI, BTC.ThisBB); 9591 9592 // Fill in fields of the BitTestBlock. 9593 BTB->Parent = CurMBB; 9594 BTB->Default = Fallthrough; 9595 9596 BTB->DefaultProb = UnhandledProbs; 9597 // If the cases in bit test don't form a contiguous range, we evenly 9598 // distribute the probability on the edge to Fallthrough to two 9599 // successors of CurMBB. 9600 if (!BTB->ContiguousRange) { 9601 BTB->Prob += DefaultProb / 2; 9602 BTB->DefaultProb -= DefaultProb / 2; 9603 } 9604 9605 // If we're in the right place, emit the bit test header right now. 9606 if (CurMBB == SwitchMBB) { 9607 visitBitTestHeader(*BTB, SwitchMBB); 9608 BTB->Emitted = true; 9609 } 9610 break; 9611 } 9612 case CC_Range: { 9613 const Value *RHS, *LHS, *MHS; 9614 ISD::CondCode CC; 9615 if (I->Low == I->High) { 9616 // Check Cond == I->Low. 9617 CC = ISD::SETEQ; 9618 LHS = Cond; 9619 RHS=I->Low; 9620 MHS = nullptr; 9621 } else { 9622 // Check I->Low <= Cond <= I->High. 9623 CC = ISD::SETLE; 9624 LHS = I->Low; 9625 MHS = Cond; 9626 RHS = I->High; 9627 } 9628 9629 // The false probability is the sum of all unhandled cases. 9630 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, 9631 getCurSDLoc(), I->Prob, UnhandledProbs); 9632 9633 if (CurMBB == SwitchMBB) 9634 visitSwitchCase(CB, SwitchMBB); 9635 else 9636 SwitchCases.push_back(CB); 9637 9638 break; 9639 } 9640 } 9641 CurMBB = Fallthrough; 9642 } 9643 } 9644 9645 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC, 9646 CaseClusterIt First, 9647 CaseClusterIt Last) { 9648 return std::count_if(First, Last + 1, [&](const CaseCluster &X) { 9649 if (X.Prob != CC.Prob) 9650 return X.Prob > CC.Prob; 9651 9652 // Ties are broken by comparing the case value. 9653 return X.Low->getValue().slt(CC.Low->getValue()); 9654 }); 9655 } 9656 9657 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList, 9658 const SwitchWorkListItem &W, 9659 Value *Cond, 9660 MachineBasicBlock *SwitchMBB) { 9661 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) && 9662 "Clusters not sorted?"); 9663 9664 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!"); 9665 9666 // Balance the tree based on branch probabilities to create a near-optimal (in 9667 // terms of search time given key frequency) binary search tree. See e.g. Kurt 9668 // Mehlhorn "Nearly Optimal Binary Search Trees" (1975). 9669 CaseClusterIt LastLeft = W.FirstCluster; 9670 CaseClusterIt FirstRight = W.LastCluster; 9671 auto LeftProb = LastLeft->Prob + W.DefaultProb / 2; 9672 auto RightProb = FirstRight->Prob + W.DefaultProb / 2; 9673 9674 // Move LastLeft and FirstRight towards each other from opposite directions to 9675 // find a partitioning of the clusters which balances the probability on both 9676 // sides. If LeftProb and RightProb are equal, alternate which side is 9677 // taken to ensure 0-probability nodes are distributed evenly. 9678 unsigned I = 0; 9679 while (LastLeft + 1 < FirstRight) { 9680 if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1))) 9681 LeftProb += (++LastLeft)->Prob; 9682 else 9683 RightProb += (--FirstRight)->Prob; 9684 I++; 9685 } 9686 9687 for (;;) { 9688 // Our binary search tree differs from a typical BST in that ours can have up 9689 // to three values in each leaf. The pivot selection above doesn't take that 9690 // into account, which means the tree might require more nodes and be less 9691 // efficient. We compensate for this here. 9692 9693 unsigned NumLeft = LastLeft - W.FirstCluster + 1; 9694 unsigned NumRight = W.LastCluster - FirstRight + 1; 9695 9696 if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) { 9697 // If one side has less than 3 clusters, and the other has more than 3, 9698 // consider taking a cluster from the other side. 9699 9700 if (NumLeft < NumRight) { 9701 // Consider moving the first cluster on the right to the left side. 9702 CaseCluster &CC = *FirstRight; 9703 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster); 9704 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft); 9705 if (LeftSideRank <= RightSideRank) { 9706 // Moving the cluster to the left does not demote it. 9707 ++LastLeft; 9708 ++FirstRight; 9709 continue; 9710 } 9711 } else { 9712 assert(NumRight < NumLeft); 9713 // Consider moving the last element on the left to the right side. 9714 CaseCluster &CC = *LastLeft; 9715 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft); 9716 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster); 9717 if (RightSideRank <= LeftSideRank) { 9718 // Moving the cluster to the right does not demot it. 9719 --LastLeft; 9720 --FirstRight; 9721 continue; 9722 } 9723 } 9724 } 9725 break; 9726 } 9727 9728 assert(LastLeft + 1 == FirstRight); 9729 assert(LastLeft >= W.FirstCluster); 9730 assert(FirstRight <= W.LastCluster); 9731 9732 // Use the first element on the right as pivot since we will make less-than 9733 // comparisons against it. 9734 CaseClusterIt PivotCluster = FirstRight; 9735 assert(PivotCluster > W.FirstCluster); 9736 assert(PivotCluster <= W.LastCluster); 9737 9738 CaseClusterIt FirstLeft = W.FirstCluster; 9739 CaseClusterIt LastRight = W.LastCluster; 9740 9741 const ConstantInt *Pivot = PivotCluster->Low; 9742 9743 // New blocks will be inserted immediately after the current one. 9744 MachineFunction::iterator BBI(W.MBB); 9745 ++BBI; 9746 9747 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster, 9748 // we can branch to its destination directly if it's squeezed exactly in 9749 // between the known lower bound and Pivot - 1. 9750 MachineBasicBlock *LeftMBB; 9751 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range && 9752 FirstLeft->Low == W.GE && 9753 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) { 9754 LeftMBB = FirstLeft->MBB; 9755 } else { 9756 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); 9757 FuncInfo.MF->insert(BBI, LeftMBB); 9758 WorkList.push_back( 9759 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2}); 9760 // Put Cond in a virtual register to make it available from the new blocks. 9761 ExportFromCurrentBlock(Cond); 9762 } 9763 9764 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a 9765 // single cluster, RHS.Low == Pivot, and we can branch to its destination 9766 // directly if RHS.High equals the current upper bound. 9767 MachineBasicBlock *RightMBB; 9768 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && 9769 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) { 9770 RightMBB = FirstRight->MBB; 9771 } else { 9772 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); 9773 FuncInfo.MF->insert(BBI, RightMBB); 9774 WorkList.push_back( 9775 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2}); 9776 // Put Cond in a virtual register to make it available from the new blocks. 9777 ExportFromCurrentBlock(Cond); 9778 } 9779 9780 // Create the CaseBlock record that will be used to lower the branch. 9781 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB, 9782 getCurSDLoc(), LeftProb, RightProb); 9783 9784 if (W.MBB == SwitchMBB) 9785 visitSwitchCase(CB, SwitchMBB); 9786 else 9787 SwitchCases.push_back(CB); 9788 } 9789 9790 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { 9791 // Extract cases from the switch. 9792 BranchProbabilityInfo *BPI = FuncInfo.BPI; 9793 CaseClusterVector Clusters; 9794 Clusters.reserve(SI.getNumCases()); 9795 for (auto I : SI.cases()) { 9796 MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()]; 9797 const ConstantInt *CaseVal = I.getCaseValue(); 9798 BranchProbability Prob = 9799 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) 9800 : BranchProbability(1, SI.getNumCases() + 1); 9801 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); 9802 } 9803 9804 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()]; 9805 9806 // Cluster adjacent cases with the same destination. We do this at all 9807 // optimization levels because it's cheap to do and will make codegen faster 9808 // if there are many clusters. 9809 sortAndRangeify(Clusters); 9810 9811 if (TM.getOptLevel() != CodeGenOpt::None) { 9812 // Replace an unreachable default with the most popular destination. 9813 // FIXME: Exploit unreachable default more aggressively. 9814 bool UnreachableDefault = 9815 isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg()); 9816 if (UnreachableDefault && !Clusters.empty()) { 9817 DenseMap<const BasicBlock *, unsigned> Popularity; 9818 unsigned MaxPop = 0; 9819 const BasicBlock *MaxBB = nullptr; 9820 for (auto I : SI.cases()) { 9821 const BasicBlock *BB = I.getCaseSuccessor(); 9822 if (++Popularity[BB] > MaxPop) { 9823 MaxPop = Popularity[BB]; 9824 MaxBB = BB; 9825 } 9826 } 9827 // Set new default. 9828 assert(MaxPop > 0 && MaxBB); 9829 DefaultMBB = FuncInfo.MBBMap[MaxBB]; 9830 9831 // Remove cases that were pointing to the destination that is now the 9832 // default. 9833 CaseClusterVector New; 9834 New.reserve(Clusters.size()); 9835 for (CaseCluster &CC : Clusters) { 9836 if (CC.MBB != DefaultMBB) 9837 New.push_back(CC); 9838 } 9839 Clusters = std::move(New); 9840 } 9841 } 9842 9843 // If there is only the default destination, jump there directly. 9844 MachineBasicBlock *SwitchMBB = FuncInfo.MBB; 9845 if (Clusters.empty()) { 9846 SwitchMBB->addSuccessor(DefaultMBB); 9847 if (DefaultMBB != NextBlock(SwitchMBB)) { 9848 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, 9849 getControlRoot(), DAG.getBasicBlock(DefaultMBB))); 9850 } 9851 return; 9852 } 9853 9854 findJumpTables(Clusters, &SI, DefaultMBB); 9855 findBitTestClusters(Clusters, &SI); 9856 9857 DEBUG({ 9858 dbgs() << "Case clusters: "; 9859 for (const CaseCluster &C : Clusters) { 9860 if (C.Kind == CC_JumpTable) dbgs() << "JT:"; 9861 if (C.Kind == CC_BitTests) dbgs() << "BT:"; 9862 9863 C.Low->getValue().print(dbgs(), true); 9864 if (C.Low != C.High) { 9865 dbgs() << '-'; 9866 C.High->getValue().print(dbgs(), true); 9867 } 9868 dbgs() << ' '; 9869 } 9870 dbgs() << '\n'; 9871 }); 9872 9873 assert(!Clusters.empty()); 9874 SwitchWorkList WorkList; 9875 CaseClusterIt First = Clusters.begin(); 9876 CaseClusterIt Last = Clusters.end() - 1; 9877 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB); 9878 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); 9879 9880 while (!WorkList.empty()) { 9881 SwitchWorkListItem W = WorkList.back(); 9882 WorkList.pop_back(); 9883 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; 9884 9885 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None && 9886 !DefaultMBB->getParent()->getFunction()->optForMinSize()) { 9887 // For optimized builds, lower large range as a balanced binary tree. 9888 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB); 9889 continue; 9890 } 9891 9892 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB); 9893 } 9894 } 9895